]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/amdgpu: cleanup static CSA handling
authorChristian König <christian.koenig@amd.com>
Mon, 31 Jul 2017 13:32:40 +0000 (15:32 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 17 Aug 2017 19:46:05 +0000 (15:46 -0400)
Move the CSA bo_va from the VM to the fpriv structure.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

index ad944aea0d4bb2eddce397da3306ba5452b44f72..1f915a5ce9ba836fa7c5ef60352d85096571641d 100644 (file)
@@ -748,6 +748,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 struct amdgpu_fpriv {
        struct amdgpu_vm        vm;
        struct amdgpu_bo_va     *prt_va;
+       struct amdgpu_bo_va     *csa_va;
        struct mutex            bo_list_lock;
        struct idr              bo_list_handles;
        struct amdgpu_ctx_mgr   ctx_mgr;
index 7e71a511990ea6011302f59248c638463320033d..3c64248673ee0f099539c2eedacb8530aa166635 100644 (file)
@@ -787,7 +787,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
 
        if (amdgpu_sriov_vf(adev)) {
                struct dma_fence *f;
-               bo_va = vm->csa_bo_va;
+
+               bo_va = fpriv->csa_va;
                BUG_ON(!bo_va);
                r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
index 29cd5dabf8b58ffd95951410d32828cc44f9629d..1aac5821ac8f4d363005cd3d3f8519cc2fa9f889 100644 (file)
@@ -843,7 +843,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        }
 
        if (amdgpu_sriov_vf(adev)) {
-               r = amdgpu_map_static_csa(adev, &fpriv->vm);
+               r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
                if (r)
                        goto out_suspend;
        }
@@ -896,8 +896,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
        if (amdgpu_sriov_vf(adev)) {
                /* TODO: how to handle reserve failure */
                BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
-               amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
-               fpriv->vm.csa_bo_va = NULL;
+               amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+               fpriv->csa_va = NULL;
                amdgpu_bo_unreserve(adev->virt.csa_obj);
        }
 
index 8a081e162d13cb6f6cc6b674fd82cd52180cb40c..89208456d36010b89cebdb58d1a4a567b5881023 100644 (file)
@@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
  * address within META_DATA init package to support SRIOV gfx preemption.
  */
 
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct amdgpu_bo_va **bo_va)
 {
-       int r;
-       struct amdgpu_bo_va *bo_va;
        struct ww_acquire_ctx ticket;
        struct list_head list;
        struct amdgpu_bo_list_entry pd;
        struct ttm_validate_buffer csa_tv;
+       int r;
 
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&csa_tv.head);
@@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                return r;
        }
 
-       bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
-       if (!bo_va) {
+       *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+       if (!*bo_va) {
                ttm_eu_backoff_reservation(&ticket, &list);
                DRM_ERROR("failed to create bo_va for static CSA\n");
                return -ENOMEM;
        }
 
-       r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
-                                  AMDGPU_CSA_SIZE);
+       r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR,
+                               AMDGPU_CSA_SIZE);
        if (r) {
                DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
-               amdgpu_vm_bo_rmv(adev, bo_va);
+               amdgpu_vm_bo_rmv(adev, *bo_va);
                ttm_eu_backoff_reservation(&ticket, &list);
                return r;
        }
 
-       r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
-                                               AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
-                                               AMDGPU_PTE_EXECUTABLE);
+       r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
+                            AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+                            AMDGPU_PTE_EXECUTABLE);
 
        if (r) {
                DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
-               amdgpu_vm_bo_rmv(adev, bo_va);
+               amdgpu_vm_bo_rmv(adev, *bo_va);
                ttm_eu_backoff_reservation(&ticket, &list);
                return r;
        }
 
-       vm->csa_bo_va = bo_va;
        ttm_eu_backoff_reservation(&ticket, &list);
        return 0;
 }
index e5b1baf387c1ffcd5a0b3c5546b697b94c97a011..afcfb8bcfb65edda5e37def8ace207e0ded84ae1 100644 (file)
@@ -90,7 +90,8 @@ static inline bool is_virtual_machine(void)
 
 struct amdgpu_vm;
 int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct amdgpu_bo_va **bo_va);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
index 6e94cd2e610c8caaa5dd893980560bbb62d83599..9c309c5a86f1c1e08e970d175992017515eb6701 100644 (file)
@@ -141,8 +141,6 @@ struct amdgpu_vm {
        u64                     client_id;
        /* dedicated to vm */
        struct amdgpu_vm_id     *reserved_vmid[AMDGPU_MAX_VMHUBS];
-       /* each VM will map on CSA */
-       struct amdgpu_bo_va *csa_bo_va;
 
        /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
        bool                    use_cpu_for_update;