return (unsigned long)busy_time;
}
+static struct msm_gem_address_space *
+a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
+ u64 start, size;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ adreno_set_llc_attributes(iommu);
+
+ mmu = msm_iommu_new(&pdev->dev, iommu);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(iommu);
+ return ERR_CAST(mmu);
+ }
+
+ /*
+ * Use the aperture start or SZ_16M, whichever is greater. This will
+ * ensure that we align with the allocated pagetable range while still
+ * allowing room in the lower 32 bits for GMEM and whatnot
+ */
+ start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+ size = iommu->geometry.aperture_end - start + 1;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ start & GENMASK_ULL(48, 0), size);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
static struct msm_gem_address_space *
a6xx_create_private_address_space(struct msm_gpu *gpu)
{
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
#endif
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = a6xx_create_address_space,
.create_private_address_space = a6xx_create_private_address_space,
.get_rptr = a6xx_get_rptr,
},
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+void adreno_set_llc_attributes(struct iommu_domain *iommu)
+{
+ struct io_pgtable_domain_attr pgtbl_cfg;
+
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+}
+
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
if (!iommu)
return NULL;
- if (adreno_is_a6xx(adreno_gpu)) {
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
-
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
- }
- }
-
mmu = msm_iommu_new(&pdev->dev, iommu);
if (IS_ERR(mmu)) {
iommu_domain_free(iommu);
return gpu->revn == 540;
}
-static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
-{
- return ((gpu->revn < 700 && gpu->revn > 599));
-}
-
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev);
+void adreno_set_llc_attributes(struct iommu_domain *iommu);
+
/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode