]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
drm/panfrost: Make sure MMU context lifetime is not bound to panfrost_priv
authorBoris Brezillon <boris.brezillon@collabora.com>
Mon, 21 Jun 2021 13:38:56 +0000 (15:38 +0200)
committerStefan Bader <stefan.bader@canonical.com>
Fri, 5 Nov 2021 09:18:59 +0000 (10:18 +0100)
BugLink: https://bugs.launchpad.net/bugs/1947781
commit 7fdc48cc63a30fa3480d18bdd8c5fff2b9b15212 upstream.

Jobs can be in-flight when the file descriptor is closed (either because
the process did not terminate properly, or because it didn't wait for
all GPU jobs to be finished), and apparently panfrost_job_close() does
not cancel already running jobs. Let's refcount the MMU context object
so it's lifetime is no longer bound to the FD lifetime and running jobs
can finish properly without generating spurious page faults.

Reported-by: Icecream95 <ixn@keemail.me>
Fixes: 7282f7645d06 ("drm/panfrost: Implement per FD address spaces")
Cc: <stable@vger.kernel.org>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210621133907.1683899-2-boris.brezillon@collabora.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Kelsey Skunberg <kelsey.skunberg@canonical.com>
drivers/gpu/drm/panfrost/panfrost_device.h
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/panfrost/panfrost_mmu.h

index 597cf1459b0a8d31ffb27c0388cd4da9f03d2cfe..4c6bdea5537b9ba7b39f2acda487fb231610e8ea 100644 (file)
@@ -120,8 +120,12 @@ struct panfrost_device {
 };
 
 struct panfrost_mmu {
+       struct panfrost_device *pfdev;
+       struct kref refcount;
        struct io_pgtable_cfg pgtbl_cfg;
        struct io_pgtable_ops *pgtbl_ops;
+       struct drm_mm mm;
+       spinlock_t mm_lock;
        int as;
        atomic_t as_count;
        struct list_head list;
@@ -132,9 +136,7 @@ struct panfrost_file_priv {
 
        struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
 
-       struct panfrost_mmu mmu;
-       struct drm_mm mm;
-       spinlock_t mm_lock;
+       struct panfrost_mmu *mmu;
 };
 
 static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
index 83a461bdeea84433ee64ba76683309f89c877314..b2aa8e05031470235acb0a6715b0d8f51db5dfd6 100644 (file)
@@ -417,7 +417,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
                 * anyway, so let's not bother.
                 */
                if (!list_is_singular(&bo->mappings.list) ||
-                   WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+                   WARN_ON_ONCE(first->mmu != priv->mmu)) {
                        ret = -EINVAL;
                        goto out_unlock_mappings;
                }
@@ -449,32 +449,6 @@ int panfrost_unstable_ioctl_check(void)
        return 0;
 }
 
-#define PFN_4G         (SZ_4G >> PAGE_SHIFT)
-#define PFN_4G_MASK    (PFN_4G - 1)
-#define PFN_16M                (SZ_16M >> PAGE_SHIFT)
-
-static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
-                                        unsigned long color,
-                                        u64 *start, u64 *end)
-{
-       /* Executable buffers can't start or end on a 4GB boundary */
-       if (!(color & PANFROST_BO_NOEXEC)) {
-               u64 next_seg;
-
-               if ((*start & PFN_4G_MASK) == 0)
-                       (*start)++;
-
-               if ((*end & PFN_4G_MASK) == 0)
-                       (*end)--;
-
-               next_seg = ALIGN(*start, PFN_4G);
-               if (next_seg - *start <= PFN_16M)
-                       *start = next_seg + 1;
-
-               *end = min(*end, ALIGN(*start, PFN_4G) - 1);
-       }
-}
-
 static int
 panfrost_open(struct drm_device *dev, struct drm_file *file)
 {
@@ -489,15 +463,11 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
        panfrost_priv->pfdev = pfdev;
        file->driver_priv = panfrost_priv;
 
-       spin_lock_init(&panfrost_priv->mm_lock);
-
-       /* 4G enough for now. can be 48-bit */
-       drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
-       panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
-
-       ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
-       if (ret)
-               goto err_pgtable;
+       panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
+       if (IS_ERR(panfrost_priv->mmu)) {
+               ret = PTR_ERR(panfrost_priv->mmu);
+               goto err_free;
+       }
 
        ret = panfrost_job_open(panfrost_priv);
        if (ret)
@@ -506,9 +476,8 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 
 err_job:
-       panfrost_mmu_pgtable_free(panfrost_priv);
-err_pgtable:
-       drm_mm_takedown(&panfrost_priv->mm);
+       panfrost_mmu_ctx_put(panfrost_priv->mmu);
+err_free:
        kfree(panfrost_priv);
        return ret;
 }
@@ -521,8 +490,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
        panfrost_perfcnt_close(file);
        panfrost_job_close(panfrost_priv);
 
-       panfrost_mmu_pgtable_free(panfrost_priv);
-       drm_mm_takedown(&panfrost_priv->mm);
+       panfrost_mmu_ctx_put(panfrost_priv->mmu);
        kfree(panfrost_priv);
 }
 
index 3e0723bc36bda98125650c00b0782e7ff5b73b0d..23377481f4e3107befec73facb21f24fc7bdeb2b 100644 (file)
@@ -60,7 +60,7 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
 
        mutex_lock(&bo->mappings.lock);
        list_for_each_entry(iter, &bo->mappings.list, node) {
-               if (iter->mmu == &priv->mmu) {
+               if (iter->mmu == priv->mmu) {
                        kref_get(&iter->refcount);
                        mapping = iter;
                        break;
@@ -74,16 +74,13 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
 static void
 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
 {
-       struct panfrost_file_priv *priv;
-
        if (mapping->active)
                panfrost_mmu_unmap(mapping);
 
-       priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
-       spin_lock(&priv->mm_lock);
+       spin_lock(&mapping->mmu->mm_lock);
        if (drm_mm_node_allocated(&mapping->mmnode))
                drm_mm_remove_node(&mapping->mmnode);
-       spin_unlock(&priv->mm_lock);
+       spin_unlock(&mapping->mmu->mm_lock);
 }
 
 static void panfrost_gem_mapping_release(struct kref *kref)
@@ -94,6 +91,7 @@ static void panfrost_gem_mapping_release(struct kref *kref)
 
        panfrost_gem_teardown_mapping(mapping);
        drm_gem_object_put(&mapping->obj->base.base);
+       panfrost_mmu_ctx_put(mapping->mmu);
        kfree(mapping);
 }
 
@@ -143,11 +141,11 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
        else
                align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
 
-       mapping->mmu = &priv->mmu;
-       spin_lock(&priv->mm_lock);
-       ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
+       mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
+       spin_lock(&mapping->mmu->mm_lock);
+       ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
                                         size >> PAGE_SHIFT, align, color, 0);
-       spin_unlock(&priv->mm_lock);
+       spin_unlock(&mapping->mmu->mm_lock);
        if (ret)
                goto err;
 
@@ -176,7 +174,7 @@ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
 
        mutex_lock(&bo->mappings.lock);
        list_for_each_entry(iter, &bo->mappings.list, node) {
-               if (iter->mmu == &priv->mmu) {
+               if (iter->mmu == priv->mmu) {
                        mapping = iter;
                        list_del(&iter->node);
                        break;
index 04e6f6f9b742ef6c5afe9cf41c1705d2b63f5e84..7e1a5664d45258d2b9ba06c2df3121cc2a31803b 100644 (file)
@@ -165,7 +165,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
                return;
        }
 
-       cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
+       cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
 
        job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
        job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -524,7 +524,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
                        if (job) {
                                pfdev->jobs[j] = NULL;
 
-                               panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
+                               panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
                                panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
 
                                dma_fence_signal_locked(job->done_fence);
index 21e552d1ac71ac889b78685da0869dead0d01ea5..44d9cb6218583b9d00262e67deac090cefb2b448 100644 (file)
@@ -1,5 +1,8 @@
 // SPDX-License-Identifier:    GPL-2.0
 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#include <drm/panfrost_drm.h>
+
 #include <linux/atomic.h>
 #include <linux/bitfield.h>
 #include <linux/delay.h>
@@ -337,7 +340,7 @@ static void mmu_tlb_inv_context_s1(void *cookie)
 
 static void mmu_tlb_sync_context(void *cookie)
 {
-       //struct panfrost_device *pfdev = cookie;
+       //struct panfrost_mmu *mmu = cookie;
        // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
 }
 
@@ -352,57 +355,10 @@ static const struct iommu_flush_ops mmu_tlb_ops = {
        .tlb_flush_walk = mmu_tlb_flush_walk,
 };
 
-int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
-{
-       struct panfrost_mmu *mmu = &priv->mmu;
-       struct panfrost_device *pfdev = priv->pfdev;
-
-       INIT_LIST_HEAD(&mmu->list);
-       mmu->as = -1;
-
-       mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
-               .pgsize_bitmap  = SZ_4K | SZ_2M,
-               .ias            = FIELD_GET(0xff, pfdev->features.mmu_features),
-               .oas            = FIELD_GET(0xff00, pfdev->features.mmu_features),
-               .coherent_walk  = pfdev->coherent,
-               .tlb            = &mmu_tlb_ops,
-               .iommu_dev      = pfdev->dev,
-       };
-
-       mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
-                                             priv);
-       if (!mmu->pgtbl_ops)
-               return -EINVAL;
-
-       return 0;
-}
-
-void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
-{
-       struct panfrost_device *pfdev = priv->pfdev;
-       struct panfrost_mmu *mmu = &priv->mmu;
-
-       spin_lock(&pfdev->as_lock);
-       if (mmu->as >= 0) {
-               pm_runtime_get_noresume(pfdev->dev);
-               if (pm_runtime_active(pfdev->dev))
-                       panfrost_mmu_disable(pfdev, mmu->as);
-               pm_runtime_put_autosuspend(pfdev->dev);
-
-               clear_bit(mmu->as, &pfdev->as_alloc_mask);
-               clear_bit(mmu->as, &pfdev->as_in_use_mask);
-               list_del(&mmu->list);
-       }
-       spin_unlock(&pfdev->as_lock);
-
-       free_io_pgtable_ops(mmu->pgtbl_ops);
-}
-
 static struct panfrost_gem_mapping *
 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
 {
        struct panfrost_gem_mapping *mapping = NULL;
-       struct panfrost_file_priv *priv;
        struct drm_mm_node *node;
        u64 offset = addr >> PAGE_SHIFT;
        struct panfrost_mmu *mmu;
@@ -415,11 +371,10 @@ addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
        goto out;
 
 found_mmu:
-       priv = container_of(mmu, struct panfrost_file_priv, mmu);
 
-       spin_lock(&priv->mm_lock);
+       spin_lock(&mmu->mm_lock);
 
-       drm_mm_for_each_node(node, &priv->mm) {
+       drm_mm_for_each_node(node, &mmu->mm) {
                if (offset >= node->start &&
                    offset < (node->start + node->size)) {
                        mapping = drm_mm_node_to_panfrost_mapping(node);
@@ -429,7 +384,7 @@ found_mmu:
                }
        }
 
-       spin_unlock(&priv->mm_lock);
+       spin_unlock(&mmu->mm_lock);
 out:
        spin_unlock(&pfdev->as_lock);
        return mapping;
@@ -542,6 +497,107 @@ err_bo:
        return ret;
 }
 
+static void panfrost_mmu_release_ctx(struct kref *kref)
+{
+       struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
+                                               refcount);
+       struct panfrost_device *pfdev = mmu->pfdev;
+
+       spin_lock(&pfdev->as_lock);
+       if (mmu->as >= 0) {
+               pm_runtime_get_noresume(pfdev->dev);
+               if (pm_runtime_active(pfdev->dev))
+                       panfrost_mmu_disable(pfdev, mmu->as);
+               pm_runtime_put_autosuspend(pfdev->dev);
+
+               clear_bit(mmu->as, &pfdev->as_alloc_mask);
+               clear_bit(mmu->as, &pfdev->as_in_use_mask);
+               list_del(&mmu->list);
+       }
+       spin_unlock(&pfdev->as_lock);
+
+       free_io_pgtable_ops(mmu->pgtbl_ops);
+       drm_mm_takedown(&mmu->mm);
+       kfree(mmu);
+}
+
+void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
+{
+       kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
+}
+
+struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
+{
+       kref_get(&mmu->refcount);
+
+       return mmu;
+}
+
+#define PFN_4G         (SZ_4G >> PAGE_SHIFT)
+#define PFN_4G_MASK    (PFN_4G - 1)
+#define PFN_16M                (SZ_16M >> PAGE_SHIFT)
+
+static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
+                                        unsigned long color,
+                                        u64 *start, u64 *end)
+{
+       /* Executable buffers can't start or end on a 4GB boundary */
+       if (!(color & PANFROST_BO_NOEXEC)) {
+               u64 next_seg;
+
+               if ((*start & PFN_4G_MASK) == 0)
+                       (*start)++;
+
+               if ((*end & PFN_4G_MASK) == 0)
+                       (*end)--;
+
+               next_seg = ALIGN(*start, PFN_4G);
+               if (next_seg - *start <= PFN_16M)
+                       *start = next_seg + 1;
+
+               *end = min(*end, ALIGN(*start, PFN_4G) - 1);
+       }
+}
+
+struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
+{
+       struct panfrost_mmu *mmu;
+
+       mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
+       if (!mmu)
+               return ERR_PTR(-ENOMEM);
+
+       mmu->pfdev = pfdev;
+       spin_lock_init(&mmu->mm_lock);
+
+       /* 4G enough for now. can be 48-bit */
+       drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
+       mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
+
+       INIT_LIST_HEAD(&mmu->list);
+       mmu->as = -1;
+
+       mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
+               .pgsize_bitmap  = SZ_4K | SZ_2M,
+               .ias            = FIELD_GET(0xff, pfdev->features.mmu_features),
+               .oas            = FIELD_GET(0xff00, pfdev->features.mmu_features),
+               .coherent_walk  = pfdev->coherent,
+               .tlb            = &mmu_tlb_ops,
+               .iommu_dev      = pfdev->dev,
+       };
+
+       mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
+                                             mmu);
+       if (!mmu->pgtbl_ops) {
+               kfree(mmu);
+               return ERR_PTR(-EINVAL);
+       }
+
+       kref_init(&mmu->refcount);
+
+       return mmu;
+}
+
 static const char *access_type_name(struct panfrost_device *pfdev,
                u32 fault_status)
 {
index 44fc2edf63ce66a13dc49126ae40f49f2b1506c8..cc2a0d307febcd2971e8ed046500100b43f1c1d5 100644 (file)
@@ -18,7 +18,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev);
 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
 
-int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv);
-void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv);
+struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu);
+void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu);
+struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev);
 
 #endif