struct list_head *head;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct i915_vma *vma;
u64 total_obj_size, total_gtt_size;
int count, ret;
u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_file *file;
struct i915_vma *vma;
int ret;
count, size);
seq_printf(m, "%llu [%llu] gtt total\n",
- dev_priv->gtt.base.total,
- (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+ dev_priv->ggtt.base.total,
+ (u64)dev_priv->ggtt.mappable_end - dev_priv->ggtt.base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = dev_priv->gtt.mappable_base;
- ap->ranges[0].size = dev_priv->gtt.mappable_end;
+ ap->ranges[0].base = dev_priv->ggtt.mappable_base;
+ ap->ranges[0].size = dev_priv->ggtt.mappable_end;
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
- aperture_size = dev_priv->gtt.mappable_end;
+ aperture_size = dev_priv->ggtt.mappable_end;
- dev_priv->gtt.mappable =
- io_mapping_create_wc(dev_priv->gtt.mappable_base,
+ dev_priv->ggtt.mappable =
+ io_mapping_create_wc(dev_priv->ggtt.mappable_base,
aperture_size);
- if (dev_priv->gtt.mappable == NULL) {
+ if (dev_priv->ggtt.mappable == NULL) {
ret = -EIO;
goto out_gtt;
}
- dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+ dev_priv->ggtt.mtrr = arch_phys_wc_add(dev_priv->ggtt.mappable_base,
aperture_size);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
pci_disable_msi(dev->pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
- arch_phys_wc_del(dev_priv->gtt.mtrr);
- io_mapping_free(dev_priv->gtt.mappable);
+ arch_phys_wc_del(dev_priv->ggtt.mtrr);
+ io_mapping_free(dev_priv->ggtt.mappable);
i915_global_gtt_cleanup(dev);
}
struct drm_atomic_state *modeset_restore_state;
struct list_head vm_list; /* Global list of all address spaces */
- struct i915_gtt gtt; /* VM representing the global address space */
+ struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
DECLARE_HASHTABLE(mm_structs, 7);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
- (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
- struct i915_gtt *ggtt = &dev_priv->gtt;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
size_t pinned;
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->gtt.base.total;
+ args->aper_size = dev_priv->ggtt.base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev_priv->gtt.mappable, page_base,
+ if (fast_user_write(dev_priv->ggtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_flush;
}
/* Use a partial view if the object is bigger than the aperture. */
- if (obj->base.size >= dev_priv->gtt.mappable_end &&
+ if (obj->base.size >= dev_priv->ggtt.mappable_end &&
obj->tiling_mode == I915_TILING_NONE) {
static const unsigned int chunk_size = 256; // 1 MiB
goto unpin;
/* Finally, remap it using the new GTT offset */
- pfn = dev_priv->gtt.mappable_base +
+ pfn = dev_priv->ggtt.mappable_base +
i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT;
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
end = vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->gtt.mappable_end);
+ end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->vm_link,
- &to_i915(obj->base.dev)->gtt.base.inactive_list);
+ &to_i915(obj->base.dev)->ggtt.base.inactive_list);
return 0;
}
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
- to_i915(obj->base.dev)->gtt.mappable_end);
+ to_i915(obj->base.dev)->ggtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
}
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
else
- args->value = to_i915(dev)->gtt.base.total;
+ args->value = to_i915(dev)->ggtt.base.total;
break;
default:
ret = -EINVAL;
/* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page =
- io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
offset);
}
if (ctx->ppgtt)
vm = &ctx->ppgtt->base;
else
- vm = &dev_priv->gtt.base;
+ vm = &dev_priv->ggtt.base;
memset(¶ms_master, 0x00, sizeof(params_master));
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
- readl(dev_priv->gtt.gsm);
+ readl(dev_priv->ggtt.gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
- readl(dev_priv->gtt.gsm);
+ readl(dev_priv->ggtt.gsm);
mark_tlbs_dirty(ppgtt);
return 0;
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ BUG_ON(!drm_mm_initialized(&dev_priv->ggtt.base.mm));
ret = gen6_init_scratch(vm);
if (ret)
return ret;
alloc:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
- 0, dev_priv->gtt.base.total,
+ 0, dev_priv->ggtt.base.total,
DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
- ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+ ret = i915_gem_evict_something(dev, &dev_priv->ggtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE,
- 0, dev_priv->gtt.base.total,
+ 0, dev_priv->ggtt.base.total,
0);
if (ret)
goto err_out;
goto err_out;
- if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ if (ppgtt->node.start < dev_priv->ggtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
return 0;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ ppgtt->base.pte_encode = dev_priv->ggtt.base.pte_encode;
if (IS_GEN6(dev)) {
ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) {
ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
{
bool ret = dev_priv->mm.interruptible;
- if (unlikely(dev_priv->gtt.do_idle_maps)) {
+ if (unlikely(dev_priv->ggtt.do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
- if (unlikely(dev_priv->gtt.do_idle_maps))
+ if (unlikely(dev_priv->ggtt.do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
i915_check_and_clear_faults(dev);
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
+ dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
+ dev_priv->ggtt.base.start,
+ dev_priv->ggtt.base.total,
true);
i915_ggtt_flush(dev_priv);
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
gen8_pte_t __iomem *gtt_entries =
- (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
gen6_pte_t __iomem *gtt_entries =
- (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ (gen8_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry;
int i;
int rpm_atomic_seq;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ (gen6_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry;
int i;
int rpm_atomic_seq;
* of the aperture.
*/
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
true);
dev_priv->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
- dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
+ WARN_ON(dev_priv->ggtt.base.bind_vma != ggtt_bind_vma);
+ dev_priv->ggtt.base.bind_vma = aliasing_gtt_bind_vma;
}
return 0;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 gtt_size, mappable_size;
- gtt_size = dev_priv->gtt.base.total;
- mappable_size = dev_priv->gtt.mappable_end;
+ gtt_size = dev_priv->ggtt.base.total;
+ mappable_size = dev_priv->ggtt.mappable_end;
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
void i915_global_gtt_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
* readback check when writing GTT PTE entries.
*/
if (IS_BROXTON(dev))
- dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+ dev_priv->ggtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
else
- dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
- if (!dev_priv->gtt.gsm) {
+ dev_priv->ggtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
+ if (!dev_priv->ggtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
if (IS_ERR(scratch_page)) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
- iounmap(dev_priv->gtt.gsm);
+ iounmap(dev_priv->ggtt.gsm);
return PTR_ERR(scratch_page);
}
- dev_priv->gtt.base.scratch_page = scratch_page;
+ dev_priv->ggtt.base.scratch_page = scratch_page;
return 0;
}
ret = ggtt_probe_common(dev, gtt_size);
- dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ dev_priv->ggtt.base.clear_range = gen8_ggtt_clear_range;
+ dev_priv->ggtt.base.insert_entries = gen8_ggtt_insert_entries;
+ dev_priv->ggtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->ggtt.base.unbind_vma = ggtt_unbind_vma;
if (IS_CHERRYVIEW(dev_priv))
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ dev_priv->ggtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ret;
}
ret = ggtt_probe_common(dev, gtt_size);
- dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ dev_priv->ggtt.base.clear_range = gen6_ggtt_clear_range;
+ dev_priv->ggtt.base.insert_entries = gen6_ggtt_insert_entries;
+ dev_priv->ggtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->ggtt.base.unbind_vma = ggtt_unbind_vma;
return ret;
}
static void gen6_gmch_remove(struct i915_address_space *vm)
{
+ struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
- struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
-
- iounmap(gtt->gsm);
+ iounmap(ggtt->gsm);
free_scratch_page(vm->dev, vm->scratch_page);
}
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
- dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
- dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ dev_priv->ggtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+ dev_priv->ggtt.base.insert_entries = i915_ggtt_insert_entries;
+ dev_priv->ggtt.base.clear_range = i915_ggtt_clear_range;
+ dev_priv->ggtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->ggtt.base.unbind_vma = ggtt_unbind_vma;
- if (unlikely(dev_priv->gtt.do_idle_maps))
+ if (unlikely(dev_priv->ggtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
return 0;
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_gtt *gtt = &dev_priv->gtt;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
- gtt->gtt_probe = i915_gmch_probe;
- gtt->base.cleanup = i915_gmch_remove;
+ ggtt->probe = i915_gmch_probe;
+ ggtt->base.cleanup = i915_gmch_remove;
} else if (INTEL_INFO(dev)->gen < 8) {
- gtt->gtt_probe = gen6_gmch_probe;
- gtt->base.cleanup = gen6_gmch_remove;
+ ggtt->probe = gen6_gmch_probe;
+ ggtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
- gtt->base.pte_encode = iris_pte_encode;
+ ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
- gtt->base.pte_encode = hsw_pte_encode;
+ ggtt->base.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
- gtt->base.pte_encode = byt_pte_encode;
+ ggtt->base.pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
- gtt->base.pte_encode = ivb_pte_encode;
+ ggtt->base.pte_encode = ivb_pte_encode;
else
- gtt->base.pte_encode = snb_pte_encode;
+ ggtt->base.pte_encode = snb_pte_encode;
} else {
- dev_priv->gtt.gtt_probe = gen8_gmch_probe;
- dev_priv->gtt.base.cleanup = gen6_gmch_remove;
+ ggtt->probe = gen8_gmch_probe;
+ ggtt->base.cleanup = gen6_gmch_remove;
}
- gtt->base.dev = dev;
- gtt->base.is_ggtt = true;
+ ggtt->base.dev = dev;
+ ggtt->base.is_ggtt = true;
- ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
- >t->mappable_base, >t->mappable_end);
+ ret = ggtt->probe(dev, &ggtt->base.total, &ggtt->stolen_size,
+ &ggtt->mappable_base, &ggtt->mappable_end);
if (ret)
return ret;
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
- gtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+ ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
return 0;
out_gtt_cleanup:
- gtt->base.cleanup(&dev_priv->gtt.base);
+ ggtt->base.cleanup(&dev_priv->ggtt.base);
return ret;
}
i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
+ dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
+ dev_priv->ggtt.base.start,
+ dev_priv->ggtt.base.total,
true);
/* Cache flush objects bound into GGTT and rebind them. */
- vm = &dev_priv->gtt.base;
+ vm = &dev_priv->ggtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
-struct i915_gtt {
+struct i915_ggtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
int mtrr;
- /* global gtt ops */
- int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
- size_t *stolen, phys_addr_t *mappable_base,
- u64 *mappable_end);
+ int (*probe)(struct drm_device *dev, u64 *gtt_total,
+ size_t *stolen, phys_addr_t *mappable_base,
+ u64 *mappable_end);
};
struct i915_hw_ppgtt {
{
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
alignment, 0,
- dev_priv->gtt.stolen_usable_size);
+ dev_priv->ggtt.stolen_usable_size);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
I85X_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} else if (IS_845G(dev)) {
u32 tseg_size = 0;
u32 tom;
I830_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} else if (IS_I830(dev)) {
u32 tseg_size = 0;
u32 tom;
I830_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - dev_priv->ggtt.stolen_size;
}
if (base == 0)
struct {
u32 start, end;
} stolen[2] = {
- { .start = base, .end = base + dev_priv->gtt.stolen_size, },
- { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->ggtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->ggtt.stolen_size, },
};
u64 gtt_start, gtt_end;
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
gtt_start &= PGTBL_ADDRESS_LO_MASK;
- gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+ gtt_end = gtt_start + gtt_total_entries(dev_priv->ggtt) * 4;
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
stolen[0].end = gtt_start;
if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) {
base = stolen[0].start;
- dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ dev_priv->ggtt.stolen_size = stolen[0].end - stolen[0].start;
} else {
base = stolen[1].start;
- dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ dev_priv->ggtt.stolen_size = stolen[1].end - stolen[1].start;
}
if (stolen[0].start != stolen[1].start ||
(unsigned long long) gtt_start,
(unsigned long long) gtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
- base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ base, base + (u32) dev_priv->ggtt.stolen_size - 1);
}
}
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ r = devm_request_mem_region(dev->dev, base, dev_priv->ggtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
* reservation starting from 1 instead of 0.
*/
r = devm_request_mem_region(dev->dev, base + 1,
- dev_priv->gtt.stolen_size - 1,
+ dev_priv->ggtt.stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
*/
if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base, base + (uint32_t)dev_priv->ggtt.stolen_size);
base = 0;
}
}
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base +
- dev_priv->gtt.stolen_size;
+ dev_priv->ggtt.stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top;
- stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+ stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
}
#endif
- if (dev_priv->gtt.stolen_size == 0)
+ if (dev_priv->ggtt.stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
- stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+ stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
return 0;
}
- dev_priv->gtt.stolen_reserved_base = reserved_base;
- dev_priv->gtt.stolen_reserved_size = reserved_size;
+ dev_priv->ggtt.stolen_reserved_base = reserved_base;
+ dev_priv->ggtt.stolen_reserved_size = reserved_size;
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
- dev_priv->gtt.stolen_size >> 10,
- (dev_priv->gtt.stolen_size - reserved_total) >> 10);
+ dev_priv->ggtt.stolen_size >> 10,
+ (dev_priv->ggtt.stolen_size - reserved_total) >> 10);
- dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
+ dev_priv->ggtt.stolen_usable_size = dev_priv->ggtt.stolen_size -
reserved_total;
/*
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later.
*/
- drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
+ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->ggtt.stolen_usable_size);
return 0;
}
struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+ BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
+ struct i915_address_space *ggtt = &dev_priv->ggtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) &&
- reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+ reloc_offset + num_pages * PAGE_SIZE <= dev_priv->ggtt.mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
- if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
+ if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->ggtt.mappable_end)
goto unwind;
}
* captures what the GPU read.
*/
- s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ s = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
return NULL;
}
#define i915_error_ggtt_object_create(dev_priv, src) \
- i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
+ i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
- &dev_priv->gtt.base;
+ &dev_priv->ggtt.base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
int intel_vgt_balloon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
unsigned long mappable_base, mappable_size, mappable_end;
unmappable_base, unmappable_size / 1024);
if (mappable_base < ggtt_vm->start ||
- mappable_end > dev_priv->gtt.mappable_end ||
- unmappable_base < dev_priv->gtt.mappable_end ||
+ mappable_end > dev_priv->ggtt.mappable_end ||
+ unmappable_base < dev_priv->ggtt.mappable_end ||
unmappable_end > ggtt_vm_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL;
}
/* Unmappable graphic memory ballooning */
- if (unmappable_base > dev_priv->gtt.mappable_end) {
+ if (unmappable_base > dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[2],
- dev_priv->gtt.mappable_end,
+ dev_priv->ggtt.mappable_end,
unmappable_base);
if (ret)
goto err;
}
- if (mappable_end < dev_priv->gtt.mappable_end) {
+ if (mappable_end < dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[1],
mappable_end,
- dev_priv->gtt.mappable_end);
+ dev_priv->ggtt.mappable_end);
if (ret)
goto err;
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
+ if (size_aligned * 2 > dev_priv->ggtt.stolen_usable_size)
return false;
mutex_lock(&dev->struct_mutex);
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
- dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
+ dev->mode_config.fb_base = dev_priv->ggtt.mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev)->num_pipes,
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
+ end = dev_priv->ggtt.stolen_size - 8 * 1024 * 1024;
else
- end = dev_priv->gtt.stolen_usable_size;
+ end = dev_priv->ggtt.stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size * 2 < dev_priv->gtt.stolen_usable_size)
+ if (size * 2 < dev_priv->ggtt.stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
+ info->apertures->ranges[0].size = dev_priv->ggtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+ ioremap_wc(dev_priv->ggtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+ regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
* for this check.
*/
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
- (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
- dev_priv->gtt.stolen_reserved_size))) {
+ if (!((rc6_ctx_base >= dev_priv->ggtt.stolen_reserved_base) &&
+ (rc6_ctx_base + PAGE_SIZE <= dev_priv->ggtt.stolen_reserved_base +
+ dev_priv->ggtt.stolen_reserved_size))) {
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pctx_paddr, paddr;
- struct i915_gtt *gtt = &dev_priv->gtt;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 pcbr;
int pctx_size = 32*1024;
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
- (gtt->stolen_size - pctx_size));
+ (ggtt->stolen_size - pctx_size));
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
- ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+ ringbuf->virtual_start = ioremap_wc(dev_priv->ggtt.mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (ringbuf->virtual_start == NULL) {
i915_gem_object_ggtt_unpin(obj);