]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
drm/vmwgfx: Make use of PFN_ALIGN/PFN_UP helper macro
authorCai Huoqing <caihuoqing@baidu.com>
Mon, 2 Aug 2021 03:33:08 +0000 (11:33 +0800)
committerZack Rusin <zackr@vmware.com>
Mon, 9 Aug 2021 21:30:34 +0000 (17:30 -0400)
it's a refactor to make use of PFN_ALIGN/PFN_UP helper macro

Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
Signed-off-by: Zack Rusin <zackr@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210802033308.927-1-caihuoqing@baidu.com
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c

index 362f56d5b12ba601023a3255fd3f4645d0446893..9e3e1429db948337110a099ed48f1b676a4754eb 100644 (file)
@@ -405,7 +405,7 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
                              bool user)
 {
        static size_t struct_size, user_struct_size;
-       size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       size_t num_pages = PFN_UP(size);
        size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 
        if (unlikely(struct_size == 0)) {
@@ -474,7 +474,6 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
                         struct ttm_placement *placement,
                         struct ttm_buffer_object **p_bo)
 {
-       unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct ttm_operation_ctx ctx = { false, false };
        struct ttm_buffer_object *bo;
        size_t acc_size;
@@ -485,7 +484,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
                return -ENOMEM;
 
        acc_size = ttm_round_pot(sizeof(*bo));
-       acc_size += ttm_round_pot(npages * sizeof(void *));
+       acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
        acc_size += ttm_round_pot(sizeof(struct ttm_tt));
 
        ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
index 4579612a782db7306e59464c2bdc614b80276fc1..3c06df2a547438263499ed10382a90fc0d459b84 100644 (file)
@@ -801,7 +801,7 @@ static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 {
        struct vmw_cmdbuf_alloc_info info;
 
-       info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       info.page_size = PFN_UP(size);
        info.node = node;
        info.done = false;
 
index c84a16c1def0d1b2bb38f83d45c56e77fa9ef3be..17a98db0001795aad25b64ee842f4ce0fd9e86bc 100644 (file)
@@ -607,8 +607,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
        if (num_entries < co_info[type].min_initial_entries) {
                vcotbl->res.backup_size = co_info[type].min_initial_entries *
                        co_info[type].size;
-               vcotbl->res.backup_size =
-                       (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+               vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
        }
 
        vcotbl->scrubbed = true;
index 338c6e2613ea7780b19a291dce6ef8653379ba7f..74fa4190921385176cd07831da5b0448a4872b2f 100644 (file)
@@ -100,7 +100,7 @@ static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
        int ret;
 
        kmap_offset = 0;
-       kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       kmap_num = PFN_UP(width*height*4);
 
        ret = ttm_bo_reserve(&bo->base, true, false, NULL);
        if (unlikely(ret != 0)) {
index c8e578f63c9ce43fa695b3bae3ceb8a7fffe864d..f9394207dd3cc082bf05de4c56ae046a1ab5d31d 100644 (file)
@@ -256,8 +256,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
                if (!otables[i].enabled)
                        continue;
 
-               otables[i].size =
-                       (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
+               otables[i].size = PFN_ALIGN(otables[i].size);
                bo_size += otables[i].size;
        }
 
@@ -385,7 +384,7 @@ static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
        while (likely(data_size > PAGE_SIZE)) {
                data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
                data_size *= VMW_PPN_SIZE;
-               tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
+               tot_size += PFN_ALIGN(data_size);
        }
 
        return tot_size >> PAGE_SHIFT;
index 3b6f6044c3259ce5bdcd12493ee918ade376948e..8d1e869cc1964784bf1bc6ba89e7a40bbcafd845 100644 (file)
@@ -353,8 +353,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 static int vmw_resource_buf_alloc(struct vmw_resource *res,
                                  bool interruptible)
 {
-       unsigned long size =
-               (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+       unsigned long size = PFN_ALIGN(res->backup_size);
        struct vmw_buffer_object *backup;
        int ret;
 
index b3c8d2da6f1a0df45739ef031cab02f9a427bde0..b8dd62529104bad7dc0762043a57359e83f35c80 100644 (file)
@@ -981,8 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
                goto no_reserve;
 
        /* Map and copy shader bytecode. */
-       ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
-                         &map);
+       ret = ttm_bo_kmap(&buf->base, 0, PFN_UP(size), &map);
        if (unlikely(ret != 0)) {
                ttm_bo_unreserve(&buf->base);
                goto no_reserve;