This allows the driver to handle io_mem mappings on their own.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int ast_mm_init(struct ast_private *ast)
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int bochs_mm_init(struct bochs_device *bochs)
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int cirrus_mm_init(struct cirrus_device *cirrus)
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int mgag200_mm_init(struct mga_device *mdev)
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
struct nvkm_vma *
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &qxl_bo_move_notify,
};
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int radeon_ttm_init(struct radeon_device *rdev)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
- pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
+ pfn = bdev->driver->io_mem_pfn(bo, page_offset);
else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
return bo;
}
+unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
+ + page_offset;
+}
+EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
struct ttm_buffer_object *bo);
+/**
+ * ttm_bo_default_iomem_pfn - get a pfn for a page offset
+ *
+ * @bo: the BO we need to look up the pfn for
+ * @page_offset: offset inside the BO to look up.
+ *
+ * Calculate the PFN for iomem based mappings during page fault
+ */
+unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
*
struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
};
/**