return bo->flags & XE_BO_CREATE_USER_BIT;
}
-static struct xe_tile *
-mem_type_to_tile(struct xe_device *xe, u32 mem_type)
+static struct xe_migrate *
+mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
{
+ struct xe_tile *tile;
+
xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
+ tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
+ return tile->migrate;
+}
+
+static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
+{
+ struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
+ struct ttm_resource_manager *mgr;
- return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
+ xe_assert(xe, resource_is_vram(res));
+ mgr = ttm_manager_type(&xe->ttm, res->mem_type);
+ return to_xe_ttm_vram_mgr(mgr)->vram;
}
static void try_add_system(struct xe_bo *bo, struct ttm_place *places,
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{
- struct xe_tile *tile = mem_type_to_tile(xe, mem_type);
struct ttm_place place = { .mem_type = mem_type };
- u64 io_size = tile->mem.vram.io_size;
+ struct xe_mem_region *vram;
+ u64 io_size;
- xe_assert(xe, tile->mem.vram.usable_size);
+ vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
+ xe_assert(xe, vram && vram->usable_size);
+ io_size = vram->io_size;
/*
* For eviction / restore on suspend / resume objects
XE_BO_CREATE_GGTT_BIT))
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (io_size < tile->mem.vram.usable_size) {
+ if (io_size < vram->usable_size) {
if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
place.fpfn = 0;
place.lpfn = io_size >> PAGE_SHIFT;
return 0;
case XE_PL_VRAM0:
case XE_PL_VRAM1: {
- struct xe_tile *tile = mem_type_to_tile(xe, mem->mem_type);
struct xe_ttm_vram_mgr_resource *vres =
to_xe_ttm_vram_mgr_resource(mem);
+ struct xe_mem_region *vram = res_to_mem_region(mem);
if (vres->used_visible_size < mem->size)
return -EINVAL;
mem->bus.offset = mem->start << PAGE_SHIFT;
- if (tile->mem.vram.mapping &&
+ if (vram->mapping &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
- mem->bus.addr = (u8 *)tile->mem.vram.mapping +
+ mem->bus.addr = (u8 *)vram->mapping +
mem->bus.offset;
- mem->bus.offset += tile->mem.vram.io_start;
+ mem->bus.offset += vram->io_start;
mem->bus.is_iomem = true;
#if !defined(CONFIG_X86)
struct ttm_resource *old_mem = ttm_bo->resource;
u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
struct ttm_tt *ttm = ttm_bo->ttm;
- struct xe_tile *tile = NULL;
+ struct xe_migrate *migrate = NULL;
struct dma_fence *fence;
bool move_lacks_source;
bool tt_has_data;
}
if (bo->tile)
- tile = bo->tile;
+ migrate = bo->tile->migrate;
else if (resource_is_vram(new_mem))
- tile = mem_type_to_tile(xe, new_mem->mem_type);
+ migrate = mem_type_to_migrate(xe, new_mem->mem_type);
else if (mem_type_is_vram(old_mem_type))
- tile = mem_type_to_tile(xe, old_mem_type);
+ migrate = mem_type_to_migrate(xe, old_mem_type);
- xe_assert(xe, tile);
- xe_tile_assert(tile, tile->migrate);
+ xe_assert(xe, migrate);
trace_xe_bo_move(bo);
xe_device_mem_access_get(xe);
/* Create a new VMAP once kernel BO back in VRAM */
if (!ret && resource_is_vram(new_mem)) {
- void *new_addr = tile->mem.vram.mapping +
+ struct xe_mem_region *vram = res_to_mem_region(new_mem);
+ void *new_addr = vram->mapping +
(new_mem->start << PAGE_SHIFT);
if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
}
} else {
if (move_lacks_source)
- fence = xe_migrate_clear(tile->migrate, bo, new_mem);
+ fence = xe_migrate_clear(migrate, bo, new_mem);
else
- fence = xe_migrate_copy(tile->migrate,
+ fence = xe_migrate_copy(migrate,
bo, bo, old_mem, new_mem);
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
unsigned long page_offset)
{
- struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
- struct xe_tile *tile = mem_type_to_tile(xe, ttm_bo->resource->mem_type);
struct xe_res_cursor cursor;
+ struct xe_mem_region *vram;
if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
+ vram = res_to_mem_region(ttm_bo->resource);
xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
- return (tile->mem.vram.io_start + cursor.start) >> PAGE_SHIFT;
+ return (vram->io_start + cursor.start) >> PAGE_SHIFT;
}
static void __xe_bo_vunmap(struct xe_bo *bo);
uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
- struct xe_tile *tile = mem_type_to_tile(xe, res->mem_type);
if (res->mem_type == XE_PL_STOLEN)
return xe_ttm_stolen_gpu_offset(xe);
- return tile->mem.vram.dpa_base;
+ return res_to_mem_region(res)->dpa_base;
}
/**