bool user)
{
static size_t struct_size, user_struct_size;
- size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t num_pages = PFN_UP(size);
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
if (unlikely(struct_size == 0)) {
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo)
{
- unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct ttm_operation_ctx ctx = { false, false };
struct ttm_buffer_object *bo;
size_t acc_size;
return -ENOMEM;
acc_size = ttm_round_pot(sizeof(*bo));
- acc_size += ttm_round_pot(npages * sizeof(void *));
+ acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
acc_size += ttm_round_pot(sizeof(struct ttm_tt));
ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
{
struct vmw_cmdbuf_alloc_info info;
- info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ info.page_size = PFN_UP(size);
info.node = node;
info.done = false;
if (num_entries < co_info[type].min_initial_entries) {
vcotbl->res.backup_size = co_info[type].min_initial_entries *
co_info[type].size;
- vcotbl->res.backup_size =
- (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
}
vcotbl->scrubbed = true;
int ret;
kmap_offset = 0;
- kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ kmap_num = PFN_UP(width*height*4);
ret = ttm_bo_reserve(&bo->base, true, false, NULL);
if (unlikely(ret != 0)) {
if (!otables[i].enabled)
continue;
- otables[i].size =
- (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
+ otables[i].size = PFN_ALIGN(otables[i].size);
bo_size += otables[i].size;
}
while (likely(data_size > PAGE_SIZE)) {
data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
data_size *= VMW_PPN_SIZE;
- tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
+ tot_size += PFN_ALIGN(data_size);
}
return tot_size >> PAGE_SHIFT;
static int vmw_resource_buf_alloc(struct vmw_resource *res,
bool interruptible)
{
- unsigned long size =
- (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ unsigned long size = PFN_ALIGN(res->backup_size);
struct vmw_buffer_object *backup;
int ret;
goto no_reserve;
/* Map and copy shader bytecode. */
- ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
- &map);
+ ret = ttm_bo_kmap(&buf->base, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
ttm_bo_unreserve(&buf->base);
goto no_reserve;