]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
drm/i915: Replace 4096 with PAGE_SIZE or I915_GTT_PAGE_SIZE
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 10 Jan 2017 14:47:34 +0000 (14:47 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 10 Jan 2017 20:54:32 +0000 (20:54 +0000)
Start converting over from the byte count to its semantic macro, either
we want to allocate the size of a physical page in main memory or we
want the size of a virtual page in the GTT. 4096 could mean either, but
PAGE_SIZE and I915_GTT_PAGE_SIZE are explicit and should help improve
code comprehension and future changes. In the future, we may want to use
variable GTT page sizes and so have the challenge of knowing which
hardcoded values were used to represent a physical page vs the virtual
page.

v2: Look for a few more 4096s to convert, discover IS_ALIGNED().
v3: 4096ul paranoia, make fence alignment a distinct value of 4096, keep
bdw stolen w/a as 4096 until we know better.
v4: Add asserts that i915_vma_insert() start/end are aligned to GTT page
sizes.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170110144734.26052-1-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
15 files changed:
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence_reg.c
drivers/gpu/drm/i915/i915_gem_fence_reg.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_ringbuffer.c

index e5d96de61c14cfe31e3238293721fde7ad19faf1..3bf517e2430a8d6b1cabb88487f8ea28d64e5cb4 100644 (file)
@@ -3496,7 +3496,7 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
                return;
 
        if (--vma->obj->pin_display == 0)
-               vma->display_alignment = 4096;
+               vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
        /* Bump the LRU to try and avoid premature eviction whilst flipping  */
        if (!i915_vma_is_active(vma))
index 40a6939e39565349bf90cca67edc512deab0c522..ed31133b3ce3531f6cd98dd61d43d01c94b127cd 100644 (file)
@@ -97,7 +97,7 @@
  * part. It should be safe to decrease this, but it's more future proof as is.
  */
 #define GEN6_CONTEXT_ALIGN (64<<10)
-#define GEN7_CONTEXT_ALIGN 4096
+#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
 
 static size_t get_context_alignment(struct drm_i915_private *dev_priv)
 {
@@ -341,7 +341,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
                ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
        else
-               ctx->ggtt_offset_bias = 4096;
+               ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
 
        return ctx;
 
@@ -456,7 +456,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
                dev_priv->hw_context_size = 0;
        } else if (HAS_HW_CONTEXTS(dev_priv)) {
                dev_priv->hw_context_size =
-                       round_up(get_context_size(dev_priv), 4096);
+                       round_up(get_context_size(dev_priv),
+                                I915_GTT_PAGE_SIZE);
                if (dev_priv->hw_context_size > (1<<20)) {
                        DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
                                         dev_priv->hw_context_size);
index 026ebc5a452a2d76d2a8c29b37f725c23cbd5002..6a5415e31acf18b7570da5177944fcfb9acb9d58 100644 (file)
@@ -264,9 +264,9 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
        if (check_color) {
                /* Expand search to cover neighbouring guard pages (or lack!) */
                if (start > target->vm->start)
-                       start -= 4096;
+                       start -= I915_GTT_PAGE_SIZE;
                if (end < target->vm->start + target->vm->total)
-                       end += 4096;
+                       end += I915_GTT_PAGE_SIZE;
        }
 
        drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
index a5fe299da1d366252d100b2829a1b854d784c37f..259fe4aa8d41b0b3a9d5e6e4a6226c360589fd8d 100644 (file)
@@ -438,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                        memset(&cache->node, 0, sizeof(cache->node));
                        ret = drm_mm_insert_node_in_range_generic
                                (&ggtt->base.mm, &cache->node,
-                                4096, 0, I915_COLOR_UNEVICTABLE,
+                                PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
                                 0, ggtt->mappable_end,
                                 DRM_MM_SEARCH_DEFAULT,
                                 DRM_MM_CREATE_DEFAULT);
@@ -851,8 +851,7 @@ eb_vma_misplaced(struct i915_vma *vma)
        WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
                !i915_vma_is_ggtt(vma));
 
-       if (entry->alignment &&
-           vma->node.start & (entry->alignment - 1))
+       if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
                return true;
 
        if (vma->node.size < entry->pad_to_size)
index 9e65696a960c6182962eb641cde512c0d78affd8..fadbe8f4c74553363370b3dca6425429bf50bca6 100644 (file)
@@ -80,11 +80,11 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
                unsigned int stride = i915_gem_object_get_stride(vma->obj);
 
                GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
-               GEM_BUG_ON(vma->node.start & 4095);
-               GEM_BUG_ON(vma->fence_size & 4095);
-               GEM_BUG_ON(stride & 127);
+               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
+               GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
+               GEM_BUG_ON(!IS_ALIGNED(stride, 128));
 
-               val = (vma->node.start + vma->fence_size - 4096) << 32;
+               val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
                val |= vma->node.start;
                val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
                if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
@@ -127,7 +127,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
                GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
                GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
                GEM_BUG_ON(!is_power_of_2(vma->fence_size));
-               GEM_BUG_ON(vma->node.start & (vma->fence_size - 1));
+               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
 
                if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
                        stride /= 128;
@@ -166,7 +166,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
                GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
                GEM_BUG_ON(!is_power_of_2(vma->fence_size));
                GEM_BUG_ON(!is_power_of_2(stride / 128));
-               GEM_BUG_ON(vma->node.start & (vma->fence_size - 1));
+               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
 
                val = vma->node.start;
                if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
index 22c4a2d01adfdaa5e66ff71e6533503000119ec1..99a31ded4dfdfcc8eac68c390be31a76df27feee 100644 (file)
@@ -30,6 +30,8 @@
 struct drm_i915_private;
 struct i915_vma;
 
+#define I965_FENCE_PAGE 4096UL
+
 struct drm_i915_fence_reg {
        struct list_head link;
        struct drm_i915_private *i915;
index 2d7ab1d35e47085a48f290d99bb56743a129184a..8aca11f5f4467bfdf97123627f885ff89be0a9cf 100644 (file)
@@ -329,7 +329,7 @@ static int __setup_page_dma(struct drm_i915_private *dev_priv,
                return -ENOMEM;
 
        p->daddr = dma_map_page(kdev,
-                               p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+                               p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 
        if (dma_mapping_error(kdev, p->daddr)) {
                __free_page(p->page);
@@ -353,7 +353,7 @@ static void cleanup_page_dma(struct drm_i915_private *dev_priv,
        if (WARN_ON(!p->page))
                return;
 
-       dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
        __free_page(p->page);
        memset(p, 0, sizeof(*p));
 }
@@ -2711,11 +2711,11 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
                                  u64 *end)
 {
        if (node->color != color)
-               *start += 4096;
+               *start += I915_GTT_PAGE_SIZE;
 
        node = list_next_entry(node, node_list);
        if (node->allocated && node->color != color)
-               *end -= 4096;
+               *end -= I915_GTT_PAGE_SIZE;
 }
 
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2742,7 +2742,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
        /* Reserve a mappable slot for our lockless error capture */
        ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
                                                  &ggtt->error_capture,
-                                                 4096, 0,
+                                                 PAGE_SIZE, 0,
                                                  I915_COLOR_UNEVICTABLE,
                                                  0, ggtt->mappable_end,
                                                  0, 0);
index 9e91d7e6149c7874898ed70b16e272b8a0149487..34a4fd560fa2478ad42000b6357cf71699db3721 100644 (file)
@@ -40,6 +40,9 @@
 #include "i915_gem_timeline.h"
 #include "i915_gem_request.h"
 
+#define I915_GTT_PAGE_SIZE 4096UL
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
 #define I915_FENCE_REG_NONE -1
 #define I915_MAX_NUM_FENCES 32
 /* 32 fences + sign bit for FENCE_REG_NONE */
@@ -543,6 +546,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 #define PIN_HIGH               BIT(9)
 #define PIN_OFFSET_BIAS                BIT(10)
 #define PIN_OFFSET_FIXED       BIT(11)
-#define PIN_OFFSET_MASK                (~4095)
+#define PIN_OFFSET_MASK                (-I915_GTT_PAGE_SIZE)
 
 #endif
index 5af19b0bf71366e7471fa5805104a5e47839c629..63ae7e813335a41499aba8d7330dbc1fb274984c 100644 (file)
@@ -187,14 +187,14 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine)
        if (!rodata)
                return 0;
 
-       if (rodata->batch_items * 4 > 4096)
+       if (rodata->batch_items * 4 > PAGE_SIZE)
                return -EINVAL;
 
        so = kmalloc(sizeof(*so), GFP_KERNEL);
        if (!so)
                return -ENOMEM;
 
-       obj = i915_gem_object_create_internal(engine->i915, 4096);
+       obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                ret = PTR_ERR(obj);
                goto err_free;
index f1a1d33febcdf56f08c75a6e325ee9e012c3a8ba..e5be8e04bf3b25e36dcd6fc0711f18bccbad9965 100644 (file)
@@ -647,8 +647,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
                        stolen_offset, gtt_offset, size);
 
        /* KISS and expect everything to be page-aligned */
-       if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
-           WARN_ON(stolen_offset & 4095))
+       if (WARN_ON(size == 0) ||
+           WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+           WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
                return NULL;
 
        stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
index 4f83e331f5981da49a7e173876b11a7f733d13e2..b1361cfd4c5c2e0d6fb7cb1b34c33e87036e0a24 100644 (file)
@@ -82,7 +82,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
 
        if (INTEL_GEN(i915) >= 4) {
                stride *= i915_gem_tile_height(tiling);
-               GEM_BUG_ON(stride & 4095);
+               GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
                return roundup(size, stride);
        }
 
@@ -117,8 +117,11 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
-       if (INTEL_GEN(i915) >= 4 || tiling == I915_TILING_NONE)
-               return 4096;
+       if (tiling == I915_TILING_NONE)
+               return I915_GTT_MIN_ALIGNMENT;
+
+       if (INTEL_GEN(i915) >= 4)
+               return I965_FENCE_PAGE;
 
        /*
         * Previous chips need to be aligned to the size of the smallest
@@ -170,7 +173,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
        else
                tile_width = 512;
 
-       if (stride & (tile_width - 1))
+       if (!IS_ALIGNED(stride, tile_width))
                return false;
 
        /* 965+ just needs multiples of tile width */
@@ -195,7 +198,7 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma,
                return false;
 
        alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
-       if (vma->node.start & (alignment - 1))
+       if (!IS_ALIGNED(vma->node.start, alignment))
                return false;
 
        return true;
index f137475fab518b0e3cfb0f026966ca4016b8a35b..490914f8966394e33f9bc5995ed31e228a72412c 100644 (file)
@@ -91,7 +91,7 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
        vma->vm = vm;
        vma->obj = obj;
        vma->size = obj->base.size;
-       vma->display_alignment = 4096;
+       vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
        if (view) {
                vma->ggtt_view = *view;
@@ -115,7 +115,7 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
                vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
                                                      i915_gem_object_get_tiling(obj),
                                                      i915_gem_object_get_stride(obj));
-               GEM_BUG_ON(vma->fence_size & 4095);
+               GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
 
                vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
                                                                i915_gem_object_get_tiling(obj),
@@ -270,7 +270,8 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        if (vma->node.size < size)
                return true;
 
-       if (alignment && vma->node.start & (alignment - 1))
+       GEM_BUG_ON(alignment && !is_power_of_2(alignment));
+       if (alignment && !IS_ALIGNED(vma->node.start, alignment))
                return true;
 
        if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
@@ -302,7 +303,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
                return;
 
        fenceable = (vma->node.size >= vma->fence_size &&
-                    (vma->node.start & (vma->fence_alignment - 1)) == 0);
+                    IS_ALIGNED(vma->node.start, vma->fence_alignment));
 
        mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
 
@@ -380,13 +381,19 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                                  alignment, vma->fence_alignment);
        }
 
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
+       GEM_BUG_ON(!is_power_of_2(alignment));
+
        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+       GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 
        end = vma->vm->total;
        if (flags & PIN_MAPPABLE)
                end = min_t(u64, end, dev_priv->ggtt.mappable_end);
        if (flags & PIN_ZONE_4G)
-               end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+               end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
+       GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 
        /* If binding the object/GGTT view requires more space than the entire
         * aperture has, reject it early before evicting everything in a vain
@@ -406,7 +413,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
        if (flags & PIN_OFFSET_FIXED) {
                u64 offset = flags & PIN_OFFSET_MASK;
-               if (offset & (alignment - 1) ||
+               if (!IS_ALIGNED(offset, alignment) ||
                    range_overflows(offset, size, end)) {
                        ret = -EINVAL;
                        goto err_unpin;
@@ -440,7 +447,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                 * with zero alignment, so where possible use the optimal
                 * path.
                 */
-               if (alignment <= 4096)
+               if (alignment <= I915_GTT_MIN_ALIGNMENT)
                        alignment = 0;
 
 search_free:
index 6db246ad2f13c5ab0d5b52fd73adb495d8158d19..81665a9eb43f550755cf414fc0bf5c0971db8939 100644 (file)
@@ -1927,7 +1927,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
        engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
        engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
 
-       ret = intel_engine_create_scratch(engine, 4096);
+       ret = intel_engine_create_scratch(engine, PAGE_SIZE);
        if (ret)
                return ret;
 
@@ -2209,7 +2209,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
        WARN_ON(ce->state);
 
-       context_size = round_up(intel_lr_context_size(engine), 4096);
+       context_size = round_up(intel_lr_context_size(engine),
+                               I915_GTT_PAGE_SIZE);
 
        /* One extra page as the sharing data between driver and GuC */
        context_size += PAGE_SIZE * LRC_PPHWSP_PN;
index 01ba36ea125e439a7d457c3a74d94c843d34cf49..0c852c024227d67c3037b6784c3394cecbeb6546 100644 (file)
@@ -26,7 +26,7 @@
 
 #include "intel_ringbuffer.h"
 
-#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
 
 /* Execlists regs */
 #define RING_ELSP(engine)                      _MMIO((engine)->mmio_base + 0x230)
index 0971ac396b6081756ac8bcc11f67845f76b4652e..ab83fc22d2073feef69320015d9eca13054c754a 100644 (file)
@@ -1736,7 +1736,7 @@ static int init_status_page(struct intel_engine_cs *engine)
        void *vaddr;
        int ret;
 
-       obj = i915_gem_object_create_internal(engine->i915, 4096);
+       obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                DRM_ERROR("Failed to allocate status page\n");
                return PTR_ERR(obj);
@@ -1777,7 +1777,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 
        engine->status_page.vma = vma;
        engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
-       engine->status_page.page_addr = memset(vaddr, 0, 4096);
+       engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
 
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
                         engine->name, i915_ggtt_offset(vma));
@@ -2049,7 +2049,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
        }
 
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-       ret = intel_ring_pin(ring, 4096);
+       ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
        if (ret) {
                intel_ring_free(ring);
                goto error;
@@ -2466,7 +2466,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
        if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
                struct i915_vma *vma;
 
-               obj = i915_gem_object_create(dev_priv, 4096);
+               obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
                if (IS_ERR(obj))
                        goto err;
 
@@ -2683,7 +2683,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
                return ret;
 
        if (INTEL_GEN(dev_priv) >= 6) {
-               ret = intel_engine_create_scratch(engine, 4096);
+               ret = intel_engine_create_scratch(engine, PAGE_SIZE);
                if (ret)
                        return ret;
        } else if (HAS_BROKEN_CS_TLB(dev_priv)) {