]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/amdgpu: move GART recovery into GTT manager v2
authorChristian König <christian.koenig@amd.com>
Mon, 16 Oct 2017 14:50:32 +0000 (16:50 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 4 Dec 2017 21:41:33 +0000 (16:41 -0500)
The GTT manager handles the GART address space anyway, so it is
completely pointless to keep the same information around twice.

v2: rebased

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h

index f25d246395a161fe15fde46682f022e1e9b42dc0..d11967a5c39258119936859daf02bb3c896cdc90 100644 (file)
@@ -1630,9 +1630,6 @@ struct amdgpu_device {
        /* link all shadow bo */
        struct list_head                shadow_list;
        struct mutex                    shadow_list_lock;
-       /* link all gtt */
-       spinlock_t                      gtt_list_lock;
-       struct list_head                gtt_list;
        /* keep an lru list of rings by HW IP */
        struct list_head                ring_lru_list;
        spinlock_t                      ring_lru_list_lock;
index 9d4e0b88b101201d3b8ee2579d0a485fc21662f5..7af0d5d8cb1df81e6292db0afcd7784bc7a442f8 100644 (file)
@@ -2180,9 +2180,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&adev->shadow_list);
        mutex_init(&adev->shadow_list_lock);
 
-       INIT_LIST_HEAD(&adev->gtt_list);
-       spin_lock_init(&adev->gtt_list_lock);
-
        INIT_LIST_HEAD(&adev->ring_lru_list);
        spin_lock_init(&adev->ring_lru_list_lock);
 
@@ -2877,7 +2874,8 @@ retry:
                                atomic_inc(&adev->vram_lost_counter);
                        }
 
-                       r = amdgpu_ttm_recover_gart(adev);
+                       r = amdgpu_gtt_mgr_recover(
+                               &adev->mman.bdev.man[TTM_PL_TT]);
                        if (r)
                                goto out;
 
@@ -2939,7 +2937,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags,
                goto error;
 
        /* we need recover gart prior to run SMC/CP/SDMA resume */
-       amdgpu_ttm_recover_gart(adev);
+       amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
 
        /* now we are okay to resume SMC/CP/SDMA */
        r = amdgpu_sriov_reinit_late(adev);
index f7669dc6909b9701af47debf307c64fb74a936da..e14ab34d8262418084abdafe4015ad5ddee5c0a7 100644 (file)
@@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr {
        atomic64_t available;
 };
 
+struct amdgpu_gtt_node {
+       struct drm_mm_node node;
+       struct ttm_buffer_object *tbo;
+};
+
 /**
  * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
  *
@@ -87,9 +92,9 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
  */
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
 {
-       struct drm_mm_node *node = mem->mm_node;
+       struct amdgpu_gtt_node *node = mem->mm_node;
 
-       return (node->start != AMDGPU_BO_INVALID_OFFSET);
+       return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
 }
 
 /**
@@ -109,7 +114,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
        struct amdgpu_gtt_mgr *mgr = man->priv;
-       struct drm_mm_node *node = mem->mm_node;
+       struct amdgpu_gtt_node *node = mem->mm_node;
        enum drm_mm_insert_mode mode;
        unsigned long fpfn, lpfn;
        int r;
@@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
                mode = DRM_MM_INSERT_HIGH;
 
        spin_lock(&mgr->lock);
-       r = drm_mm_insert_node_in_range(&mgr->mm, node,
-                                       mem->num_pages, mem->page_alignment, 0,
-                                       fpfn, lpfn, mode);
+       r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
+                                       mem->page_alignment, 0, fpfn, lpfn,
+                                       mode);
        spin_unlock(&mgr->lock);
 
        if (!r)
-               mem->start = node->start;
+               mem->start = node->node.start;
 
        return r;
 }
@@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
                              struct ttm_mem_reg *mem)
 {
        struct amdgpu_gtt_mgr *mgr = man->priv;
-       struct drm_mm_node *node;
+       struct amdgpu_gtt_node *node;
        int r;
 
        spin_lock(&mgr->lock);
@@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
                goto err_out;
        }
 
-       node->start = AMDGPU_BO_INVALID_OFFSET;
-       node->size = mem->num_pages;
+       node->node.start = AMDGPU_BO_INVALID_OFFSET;
+       node->node.size = mem->num_pages;
+       node->tbo = tbo;
        mem->mm_node = node;
 
        if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
@@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
                        goto err_out;
                }
        } else {
-               mem->start = node->start;
+               mem->start = node->node.start;
        }
 
        return 0;
@@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
                               struct ttm_mem_reg *mem)
 {
        struct amdgpu_gtt_mgr *mgr = man->priv;
-       struct drm_mm_node *node = mem->mm_node;
+       struct amdgpu_gtt_node *node = mem->mm_node;
 
        if (!node)
                return;
 
        spin_lock(&mgr->lock);
-       if (node->start != AMDGPU_BO_INVALID_OFFSET)
-               drm_mm_remove_node(node);
+       if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
+               drm_mm_remove_node(&node->node);
        spin_unlock(&mgr->lock);
        atomic64_add(mem->num_pages, &mgr->available);
 
@@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
        return (result > 0 ? result : 0) * PAGE_SIZE;
 }
 
+int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
+{
+       struct amdgpu_gtt_mgr *mgr = man->priv;
+       struct amdgpu_gtt_node *node;
+       struct drm_mm_node *mm_node;
+       int r = 0;
+
+       spin_lock(&mgr->lock);
+       drm_mm_for_each_node(mm_node, &mgr->mm) {
+               node = container_of(mm_node, struct amdgpu_gtt_node, node);
+               r = amdgpu_ttm_recover_gart(node->tbo);
+               if (r)
+                       break;
+       }
+       spin_unlock(&mgr->lock);
+
+       return r;
+}
+
 /**
  * amdgpu_gtt_mgr_debug - dump VRAM table
  *
index 3d02c2dd06e5532fe4e8f3c441e4e2d4c4825eb7..34dbe7afb6002f58149be5401e03541bea0f2208 100644 (file)
@@ -689,7 +689,6 @@ struct amdgpu_ttm_tt {
        struct list_head        guptasks;
        atomic_t                mmu_invalidations;
        uint32_t                last_set_pages;
-       struct list_head        list;
 };
 
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
@@ -865,21 +864,14 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
                return 0;
        }
 
-       spin_lock(&gtt->adev->gtt_list_lock);
        flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
        gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
        r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
                ttm->pages, gtt->ttm.dma_address, flags);
 
-       if (r) {
+       if (r)
                DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
                          ttm->num_pages, gtt->offset);
-               goto error_gart_bind;
-       }
-
-       list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
-       spin_unlock(&gtt->adev->gtt_list_lock);
        return r;
 }
 
@@ -920,29 +912,23 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo)
        return r;
 }
 
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 {
-       struct amdgpu_ttm_tt *gtt, *tmp;
-       struct ttm_mem_reg bo_mem;
+       struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+       struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
        uint64_t flags;
        int r;
 
-       bo_mem.mem_type = TTM_PL_TT;
-       spin_lock(&adev->gtt_list_lock);
-       list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
-               flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
-               r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
-                                    gtt->ttm.ttm.pages, gtt->ttm.dma_address,
-                                    flags);
-               if (r) {
-                       spin_unlock(&adev->gtt_list_lock);
-                       DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
-                                 gtt->ttm.ttm.num_pages, gtt->offset);
-                       return r;
-               }
-       }
-       spin_unlock(&adev->gtt_list_lock);
-       return 0;
+       if (!gtt)
+               return 0;
+
+       flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
+       r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+                            gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
+       if (r)
+               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+                         gtt->ttm.ttm.num_pages, gtt->offset);
+       return r;
 }
 
 static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
@@ -957,16 +943,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
                return 0;
 
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
-       spin_lock(&gtt->adev->gtt_list_lock);
        r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
-       if (r) {
+       if (r)
                DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
                          gtt->ttm.ttm.num_pages, gtt->offset);
-               goto error_unbind;
-       }
-       list_del_init(&gtt->list);
-error_unbind:
-       spin_unlock(&gtt->adev->gtt_list_lock);
        return r;
 }
 
@@ -1003,7 +983,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
                kfree(gtt);
                return NULL;
        }
-       INIT_LIST_HEAD(&gtt->list);
        return &gtt->ttm.ttm;
 }
 
index 016d2af05d34dbe453b7841203ed07c7e0c42e44..d2985def416804fef4653328aa4bb20f9e13480c 100644 (file)
@@ -69,6 +69,7 @@ extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
 
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
@@ -91,7 +92,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 int amdgpu_ttm_bind(struct ttm_buffer_object *bo);
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
+int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
 
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);