]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/amdgpu: Make amdgpu_bo_reserve use uninterruptible waits for cleanup
authorMichel Dänzer <michel.daenzer@amd.com>
Fri, 28 Apr 2017 08:28:14 +0000 (17:28 +0900)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 28 Apr 2017 21:33:16 +0000 (17:33 -0400)
Some of these paths probably cannot be interrupted by a signal anyway.
Those that can would fail to clean up things if they actually got
interrupted.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
16 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c

index d46773b8f7e81c73547808dccb18b5a6eb201b56..21125a9452b83fd6c70edcf20e58ce211ac9704e 100644 (file)
@@ -123,7 +123,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
        int r;
 
        /* unpin of the old buffer */
-       r = amdgpu_bo_reserve(work->old_abo, false);
+       r = amdgpu_bo_reserve(work->old_abo, true);
        if (likely(r == 0)) {
                r = amdgpu_bo_unpin(work->old_abo);
                if (unlikely(r != 0)) {
index a48142d930c641437fa2695e4793e7b3a814ec46..236d9950221b62665e8728941faa5793fc757980 100644 (file)
@@ -112,7 +112,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
        struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
        int ret;
 
-       ret = amdgpu_bo_reserve(abo, false);
+       ret = amdgpu_bo_reserve(abo, true);
        if (likely(ret == 0)) {
                amdgpu_bo_kunmap(abo);
                amdgpu_bo_unpin(abo);
index 6d691abe889c82f94a1ff1eda95c8992916a8560..e7406ce7093cdbaf79b8c89fd723ca7e64cd86ee 100644 (file)
@@ -183,7 +183,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
        if (adev->gart.robj == NULL) {
                return;
        }
-       r = amdgpu_bo_reserve(adev->gart.robj, false);
+       r = amdgpu_bo_reserve(adev->gart.robj, true);
        if (likely(r == 0)) {
                amdgpu_bo_kunmap(adev->gart.robj);
                amdgpu_bo_unpin(adev->gart.robj);
index 21f616cdb2795ad5525e414ac1a70263fec03dcb..4f2b6679f72f28dfe397fc4d17bf57cf3916801e 100644 (file)
@@ -821,7 +821,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 
        if (amdgpu_sriov_vf(adev)) {
                /* TODO: how to handle reserve failure */
-               BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
+               BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
                amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
                fpriv->vm.csa_bo_va = NULL;
                amdgpu_bo_unreserve(adev->virt.csa_obj);
index 3826d5aea0a6a55d00d9aae2bda9f7b04489ec60..6bdc866570ab8d67575ac3734d1f67573978df79 100644 (file)
@@ -113,7 +113,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
        int ret = 0;
 
-       ret = amdgpu_bo_reserve(bo, false);
+       ret = amdgpu_bo_reserve(bo, true);
        if (unlikely(ret != 0))
                return;
 
index de9f919ae33695a010eb67f8d1eda118ed5444de..5ca75a456ad2ad94e81773fc209e1977dc18aa9f 100644 (file)
@@ -130,7 +130,7 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
                return -EINVAL;
        }
 
-       r = amdgpu_bo_reserve(sa_manager->bo, false);
+       r = amdgpu_bo_reserve(sa_manager->bo, true);
        if (!r) {
                amdgpu_bo_kunmap(sa_manager->bo);
                amdgpu_bo_unpin(sa_manager->bo);
index 24ca251f82ca2ea9314d23e4197b919385922523..7bb1bf76319a513e6bc5555f50e97cd3daa0d94c 100644 (file)
@@ -1198,7 +1198,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
                return;
        amdgpu_ttm_debugfs_fini(adev);
        if (adev->stollen_vga_memory) {
-               r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+               r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
                if (r == 0) {
                        amdgpu_bo_unpin(adev->stollen_vga_memory);
                        amdgpu_bo_unreserve(adev->stollen_vga_memory);
index ba98d35340a3025e863d3b7d871ddcd179a66703..0cdeb6a2e4a0166d8f33a542e950e7c87abc11fa 100644 (file)
@@ -2230,7 +2230,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
        if (!atomic && fb && fb != crtc->primary->fb) {
                amdgpu_fb = to_amdgpu_framebuffer(fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r != 0))
                        return r;
                amdgpu_bo_unpin(abo);
@@ -2589,7 +2589,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 unpin:
        if (amdgpu_crtc->cursor_bo) {
                struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(aobj, false);
+               ret = amdgpu_bo_reserve(aobj, true);
                if (likely(ret == 0)) {
                        amdgpu_bo_unpin(aobj);
                        amdgpu_bo_unreserve(aobj);
@@ -2720,7 +2720,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
 
                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r))
                        DRM_ERROR("failed to reserve abo before unpin\n");
                else {
index e59bc42df18c4209f059d7201e6778e6478b7a78..773654a19749fa7594250c6683d688e50bb12172 100644 (file)
@@ -2214,7 +2214,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
        if (!atomic && fb && fb != crtc->primary->fb) {
                amdgpu_fb = to_amdgpu_framebuffer(fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r != 0))
                        return r;
                amdgpu_bo_unpin(abo);
@@ -2609,7 +2609,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 unpin:
        if (amdgpu_crtc->cursor_bo) {
                struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(aobj, false);
+               ret = amdgpu_bo_reserve(aobj, true);
                if (likely(ret == 0)) {
                        amdgpu_bo_unpin(aobj);
                        amdgpu_bo_unreserve(aobj);
@@ -2740,7 +2740,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
 
                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r))
                        DRM_ERROR("failed to reserve abo before unpin\n");
                else {
index e146d252aa300caa3853988b5605695cf5669f55..1f3552967ba374c2e5677a742b70e2040f867d65 100644 (file)
@@ -1645,7 +1645,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
        if (!atomic && fb && fb != crtc->primary->fb) {
                amdgpu_fb = to_amdgpu_framebuffer(fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r != 0))
                        return r;
                amdgpu_bo_unpin(abo);
@@ -1962,7 +1962,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 unpin:
        if (amdgpu_crtc->cursor_bo) {
                struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(aobj, false);
+               ret = amdgpu_bo_reserve(aobj, true);
                if (likely(ret == 0)) {
                        amdgpu_bo_unpin(aobj);
                        amdgpu_bo_unreserve(aobj);
@@ -2088,7 +2088,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
 
                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r))
                        DRM_ERROR("failed to reserve abo before unpin\n");
                else {
index 6df7a28e8aaccbd3e47225148252dd3c427744d4..3c558c170e5e685ad58aafa8dc6df301e879f264 100644 (file)
@@ -2089,7 +2089,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
        if (!atomic && fb && fb != crtc->primary->fb) {
                amdgpu_fb = to_amdgpu_framebuffer(fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r != 0))
                        return r;
                amdgpu_bo_unpin(abo);
@@ -2440,7 +2440,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 unpin:
        if (amdgpu_crtc->cursor_bo) {
                struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(aobj, false);
+               ret = amdgpu_bo_reserve(aobj, true);
                if (likely(ret == 0)) {
                        amdgpu_bo_unpin(aobj);
                        amdgpu_bo_unreserve(aobj);
@@ -2571,7 +2571,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
 
                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r))
                        DRM_ERROR("failed to reserve abo before unpin\n");
                else {
index 81a24b6b484625bfc76916be44e6bd4e9ba53dcf..f1b479b6ac983da35b8e341cf98928bc35dcd700 100644 (file)
@@ -248,7 +248,7 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
 
                amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
                abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-               r = amdgpu_bo_reserve(abo, false);
+               r = amdgpu_bo_reserve(abo, true);
                if (unlikely(r))
                        DRM_ERROR("failed to reserve abo before unpin\n");
                else {
index 6a429e9272978aab4f3b890f92f0d9fdb2e2f59c..a125f9d44577ed4bb86d01516c7594977def46a7 100644 (file)
@@ -2437,7 +2437,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
        int r;
 
        if (adev->gfx.rlc.save_restore_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
@@ -2448,7 +2448,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
        }
 
        if (adev->gfx.rlc.clear_state_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@@ -2459,7 +2459,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
        }
 
        if (adev->gfx.rlc.cp_table_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
index eca022eaff44f5759d5fde6cfc5a599a9d61dd8c..ee2f2139e2eba62b31173052c7b809c152489585 100644 (file)
@@ -2792,7 +2792,7 @@ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
                struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
                if (ring->mqd_obj) {
-                       r = amdgpu_bo_reserve(ring->mqd_obj, false);
+                       r = amdgpu_bo_reserve(ring->mqd_obj, true);
                        if (unlikely(r != 0))
                                dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
 
@@ -2810,7 +2810,7 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
        int r;
 
        if (adev->gfx.mec.hpd_eop_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
@@ -3359,7 +3359,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
 
        /* save restore block */
        if (adev->gfx.rlc.save_restore_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
@@ -3371,7 +3371,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
 
        /* clear state block */
        if (adev->gfx.rlc.clear_state_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@@ -3383,7 +3383,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
 
        /* clear state block */
        if (adev->gfx.rlc.cp_table_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
index 0115c5f65c01c7e64a6305ef299eadb7acdc5365..703bd55d6075e939974d596552890307044226b9 100644 (file)
@@ -1239,7 +1239,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
 
        /* clear state block */
        if (adev->gfx.rlc.clear_state_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@@ -1250,7 +1250,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
 
        /* jump table block */
        if (adev->gfx.rlc.cp_table_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
@@ -1363,7 +1363,7 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
        int r;
 
        if (adev->gfx.mec.hpd_eop_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
@@ -1490,7 +1490,7 @@ static int gfx_v8_0_kiq_init(struct amdgpu_device *adev)
 
        memset(hpd, 0, MEC_HPD_SIZE);
 
-       r = amdgpu_bo_reserve(kiq->eop_obj, false);
+       r = amdgpu_bo_reserve(kiq->eop_obj, true);
        if (unlikely(r != 0))
                dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
        amdgpu_bo_kunmap(kiq->eop_obj);
index 210d21c085f2f5a3051f3c6b5cf506b2e849a8ea..4073fd24dd13cb725caf0106f88a139b3e90905a 100644 (file)
@@ -453,7 +453,7 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
        int r;
 
        if (adev->gfx.mec.hpd_eop_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
@@ -463,7 +463,7 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
                adev->gfx.mec.hpd_eop_obj = NULL;
        }
        if (adev->gfx.mec.mec_fw_obj) {
-               r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
+               r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
                if (unlikely(r != 0))
                        dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
                amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
@@ -599,7 +599,7 @@ static int gfx_v9_0_kiq_init(struct amdgpu_device *adev)
 
        memset(hpd, 0, MEC_HPD_SIZE);
 
-       r = amdgpu_bo_reserve(kiq->eop_obj, false);
+       r = amdgpu_bo_reserve(kiq->eop_obj, true);
        if (unlikely(r != 0))
                dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
        amdgpu_bo_kunmap(kiq->eop_obj);
@@ -1786,7 +1786,7 @@ static void gfx_v9_0_cp_compute_fini(struct amdgpu_device *adev)
                struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
                if (ring->mqd_obj) {
-                       r = amdgpu_bo_reserve(ring->mqd_obj, false);
+                       r = amdgpu_bo_reserve(ring->mqd_obj, true);
                        if (unlikely(r != 0))
                                dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);