]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
drm/ttm: move the page_alignment into the BO v2
authorChristian König <christian.koenig@amd.com>
Fri, 5 Feb 2021 15:17:07 +0000 (16:17 +0100)
committerChristian König <christian.koenig@amd.com>
Fri, 23 Apr 2021 14:23:02 +0000 (16:23 +0200)
The alignment is a constant property and shouldn't change.

v2: move documentation as well as suggested by Matthew.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210413135248.1266-4-christian.koenig@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_range_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_resource.h

index b443907afceadfebab03bec153292bff145d5a0a..f1c397be383da6388fcf23d28ad4a847339a9ed6 100644 (file)
@@ -763,7 +763,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
                void __user *out = u64_to_user_ptr(args->value);
 
                info.bo_size = robj->tbo.base.size;
-               info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
+               info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
                info.domains = robj->preferred_domains;
                info.domain_flags = robj->flags;
                amdgpu_bo_unreserve(robj);
index 8980329cded0298d42529ac0520a5f9442c97f96..cc48dfa83fe1a9b625d19bedbd22590fc58c7ffc 100644 (file)
@@ -207,7 +207,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 
        spin_lock(&mgr->lock);
        r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
-                                       mem->page_alignment, 0, place->fpfn,
+                                       tbo->page_alignment, 0, place->fpfn,
                                        place->lpfn, DRM_MM_INSERT_BEST);
        spin_unlock(&mgr->lock);
 
index 9ac37569823ff4c7bc9ef22093c4189b6d969bcb..ae4a68db87c09e64aea8fa078f69afbbe56ef7d6 100644 (file)
@@ -184,7 +184,7 @@ static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
 
 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
 {
-       return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+       return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
 }
 
 /**
index c89b66bb70e2e7c03de3723c304121907029e8d3..a472de7eba3e46b1aca38f9df4076d71c3efaa2c 100644 (file)
@@ -451,7 +451,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
                /* default to 2MB */
                pages_per_node = (2UL << (20UL - PAGE_SHIFT));
 #endif
-               pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
+               pages_per_node = max((uint32_t)pages_per_node,
+                                    tbo->page_alignment);
                num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
        }
 
@@ -490,7 +491,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 
        for (; pages_left; ++i) {
                unsigned long pages = min(pages_left, pages_per_node);
-               uint32_t alignment = mem->page_alignment;
+               uint32_t alignment = tbo->page_alignment;
 
                if (pages == pages_per_node)
                        alignment = pages_per_node;
index 9896d8231fe5c57d39977790250717853505f8f4..fd4116bdde0f56b4c1dc59b6ad2e9a12a1b9df91 100644 (file)
@@ -119,7 +119,7 @@ static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
 
 static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
 {
-       return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+       return (bo->tbo.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
 }
 
 /**
index 5b332206c1acecd885b5b107b4284d6baf9c0ef6..df63a07a70de9d91f92e62760c9881a299e36ada 100644 (file)
@@ -903,7 +903,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
        memset(&hop, 0, sizeof(hop));
 
        mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
-       mem.page_alignment = bo->mem.page_alignment;
        mem.bus.offset = 0;
        mem.bus.addr = NULL;
        mem.mm_node = NULL;
@@ -1038,10 +1037,10 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
        INIT_LIST_HEAD(&bo->ddestroy);
        bo->bdev = bdev;
        bo->type = type;
+       bo->page_alignment = page_alignment;
        bo->mem.mem_type = TTM_PL_SYSTEM;
        bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
        bo->mem.mm_node = NULL;
-       bo->mem.page_alignment = page_alignment;
        bo->mem.bus.offset = 0;
        bo->mem.bus.addr = NULL;
        bo->moving = NULL;
index b1e3f30f7e2d8ca86f340e188f52d9ba714c1e35..b9d5da6e6a8106caed189164db4ed5c7413860ba 100644 (file)
@@ -79,9 +79,8 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
                mode = DRM_MM_INSERT_HIGH;
 
        spin_lock(&rman->lock);
-       ret = drm_mm_insert_node_in_range(mm, node,
-                                         mem->num_pages,
-                                         mem->page_alignment, 0,
+       ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+                                         bo->page_alignment, 0,
                                          place->fpfn, lpfn, mode);
        spin_unlock(&rman->lock);
 
index eb63cbe64909d705bf38bbc14644d04c160646c3..5ccc35b3194c967d0e0eb4925c1c2905bb32a087 100644 (file)
@@ -28,15 +28,16 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
 
 static const struct ttm_resource_manager_func vmw_thp_func;
 
-static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
+static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
+                                 struct drm_mm *mm, struct drm_mm_node *node,
                                  unsigned long align_pages,
                                  const struct ttm_place *place,
                                  struct ttm_resource *mem,
                                  unsigned long lpfn,
                                  enum drm_mm_insert_mode mode)
 {
-       if (align_pages >= mem->page_alignment &&
-           (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
+       if (align_pages >= bo->page_alignment &&
+           (!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
                return drm_mm_insert_node_in_range(mm, node,
                                                   mem->num_pages,
                                                   align_pages, 0,
@@ -75,7 +76,7 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
        if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
                align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
                if (mem->num_pages >= align_pages) {
-                       ret = vmw_thp_insert_aligned(mm, node, align_pages,
+                       ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
                                                     place, mem, lpfn, mode);
                        if (!ret)
                                goto found_unlock;
@@ -84,14 +85,14 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
 
        align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
        if (mem->num_pages >= align_pages) {
-               ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
-                                            lpfn, mode);
+               ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
+                                            mem, lpfn, mode);
                if (!ret)
                        goto found_unlock;
        }
 
        ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
-                                         mem->page_alignment, 0,
+                                         bo->page_alignment, 0,
                                          place->fpfn, lpfn, mode);
 found_unlock:
        spin_unlock(&rman->lock);
index e88da481a976a67cb1b8f0c7c7536ed04baaa007..0d727a091e23bf60e1aa21d66c4171a0a71fe6d2 100644 (file)
@@ -86,6 +86,7 @@ struct ttm_tt;
  * @base: drm_gem_object superclass data.
  * @bdev: Pointer to the buffer object device structure.
  * @type: The bo type.
+ * @page_alignment: Page alignment.
  * @destroy: Destruction function. If NULL, kfree is used.
  * @num_pages: Actual number of pages.
  * @kref: Reference count of this buffer object. When this refcount reaches
@@ -123,6 +124,7 @@ struct ttm_buffer_object {
 
        struct ttm_device *bdev;
        enum ttm_bo_type type;
+       uint32_t page_alignment;
        void (*destroy) (struct ttm_buffer_object *);
 
        /**
index 6164ccf4f30836fff92c797bb8e733064a41cf14..890b9d3695193c329a8dda908c5639494e37bec0 100644 (file)
@@ -161,7 +161,6 @@ struct ttm_bus_placement {
  * @mm_node: Memory manager node.
  * @size: Requested size of memory region.
  * @num_pages: Actual size of memory region in pages.
- * @page_alignment: Page alignment.
  * @placement: Placement flags.
  * @bus: Placement on io bus accessible to the CPU
  *
@@ -172,7 +171,6 @@ struct ttm_resource {
        void *mm_node;
        unsigned long start;
        unsigned long num_pages;
-       uint32_t page_alignment;
        uint32_t mem_type;
        uint32_t placement;
        struct ttm_bus_placement bus;