]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/gpu/drm/msm/msm_gem.c
Merge branch 'msm-fixes-v5.13-rc6' into msm-next-redo
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / msm / msm_gem.c
index 369d91e6361ecd2723f245f1bd515f0f7044498e..60eacd92e4d290ebeb6aacc1dfcabcd45860aa13 100644 (file)
@@ -211,6 +211,13 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
        msm_gem_unlock(obj);
 }
 
+static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
+{
+       if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+               return pgprot_writecombine(prot);
+       return prot;
+}
+
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
                struct vm_area_struct *vma)
 {
@@ -218,22 +225,7 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
 
        vma->vm_flags &= ~VM_PFNMAP;
        vma->vm_flags |= VM_MIXEDMAP;
-
-       if (msm_obj->flags & MSM_BO_WC) {
-               vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-       } else if (msm_obj->flags & MSM_BO_UNCACHED) {
-               vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-       } else {
-               /*
-                * Shunt off cached objs to shmem file so they have their own
-                * address_space (so unmap_mapping_range does what we want,
-                * in particular in the case of mmap'd dmabufs)
-                */
-               vma->vm_pgoff = 0;
-               vma_set_file(vma, obj->filp);
-
-               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       }
+       vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
 
        return 0;
 }
@@ -451,6 +443,9 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
        if (msm_obj->flags & MSM_BO_MAP_PRIV)
                prot |= IOMMU_PRIV;
 
+       if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
+               prot |= IOMMU_CACHE;
+
        GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
@@ -653,7 +648,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
                        goto fail;
                }
                msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
-                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+                               VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
                if (msm_obj->vaddr == NULL) {
                        ret = -ENOMEM;
                        goto fail;
@@ -1164,6 +1159,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
                uint32_t size, uint32_t flags,
                struct drm_gem_object **obj)
 {
+       struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj;
 
        switch (flags & MSM_BO_CACHE_MASK) {
@@ -1171,6 +1167,10 @@ static int msm_gem_new_impl(struct drm_device *dev,
        case MSM_BO_CACHED:
        case MSM_BO_WC:
                break;
+       case MSM_BO_CACHED_COHERENT:
+               if (priv->has_cached_coherent)
+                       break;
+               /* fallthrough */
        default:
                DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
                                (flags & MSM_BO_CACHE_MASK));