]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
drm/xe: add lockdep annotation for xe_device_mem_access_put()
authorMatthew Auld <matthew.auld@intel.com>
Mon, 24 Jul 2023 10:47:44 +0000 (11:47 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 21 Dec 2023 16:37:53 +0000 (11:37 -0500)
The main motivation is with d3cold which will make the suspend and
resume callbacks even more scary, but is useful regardless. We already
have the needed annotation on the acquire side with
xe_device_mem_access_get(), and by adding the annotation on the release
side we should have a lot more confidence that our locking hierarchy is
correct.

v2:
  - Move the annotation into both callbacks for better symmetry. Also
    don't hold over the entire mem_access_get(); we only need to lockep
    to understand what is being held upon entering mem_access_get(), and
    how that matches up with locks in the callbacks.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Anshuman Gupta <anshuman.gupta@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device.h
drivers/gpu/drm/xe/xe_pm.c

index b1f36c986f0d0ea5c4c5bb0566065e51830735ef..f948a358f53e5ed94f56665c39483aa891ba3241 100644 (file)
@@ -35,7 +35,7 @@
 #include "xe_wait_user_fence.h"
 
 #ifdef CONFIG_LOCKDEP
-static struct lockdep_map xe_device_mem_access_lockdep_map = {
+struct lockdep_map xe_device_mem_access_lockdep_map = {
        .name = "xe_device_mem_access_lockdep_map"
 };
 #endif
@@ -431,13 +431,13 @@ void xe_device_mem_access_get(struct xe_device *xe)
         * runtime_resume callback, lockdep should give us a nice splat.
         */
        lock_map_acquire(&xe_device_mem_access_lockdep_map);
+       lock_map_release(&xe_device_mem_access_lockdep_map);
 
        xe_pm_runtime_get(xe);
        ref = atomic_inc_return(&xe->mem_access.ref);
 
        XE_WARN_ON(ref == S32_MAX);
 
-       lock_map_release(&xe_device_mem_access_lockdep_map);
 }
 
 void xe_device_mem_access_put(struct xe_device *xe)
index 8b085ffdc5f8df6d243c67cf69e11d019ba25213..593accb68281f3814d3ef0ff58758e69a46a056b 100644 (file)
@@ -16,6 +16,10 @@ struct xe_file;
 #include "xe_force_wake.h"
 #include "xe_macros.h"
 
+#ifdef CONFIG_LOCKDEP
+extern struct lockdep_map xe_device_mem_access_lockdep_map;
+#endif
+
 static inline struct xe_device *to_xe_device(const struct drm_device *dev)
 {
        return container_of(dev, struct xe_device, drm);
index 04b995aa848f46f9824f57fc678859f795ed2e30..cb2a00ea28e303f7d0fe07ab657647b1f01ce423 100644 (file)
@@ -188,6 +188,29 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
        /* Disable access_ongoing asserts and prevent recursive pm calls */
        xe_pm_write_callback_task(xe, current);
 
+       /*
+        * The actual xe_device_mem_access_put() is always async underneath, so
+        * exactly where that is called should makes no difference to us. However
+        * we still need to be very careful with the locks that this callback
+        * acquires and the locks that are acquired and held by any callers of
+        * xe_device_mem_access_get(). We already have the matching annotation
+        * on that side, but we also need it here. For example lockdep should be
+        * able to tell us if the following scenario is in theory possible:
+        *
+        * CPU0                          | CPU1 (kworker)
+        * lock(A)                       |
+        *                               | xe_pm_runtime_suspend()
+        *                               |      lock(A)
+        * xe_device_mem_access_get()    |
+        *
+        * This will clearly deadlock since rpm core needs to wait for
+        * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
+        * on CPU0 which prevents CPU1 making forward progress.  With the
+        * annotation here and in xe_device_mem_access_get() lockdep will see
+        * the potential lock inversion and give us a nice splat.
+        */
+       lock_map_acquire(&xe_device_mem_access_lockdep_map);
+
        if (xe->d3cold.allowed) {
                err = xe_bo_evict_all(xe);
                if (err)
@@ -202,6 +225,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
 
        xe_irq_suspend(xe);
 out:
+       lock_map_release(&xe_device_mem_access_lockdep_map);
        xe_pm_write_callback_task(xe, NULL);
        return err;
 }
@@ -215,6 +239,8 @@ int xe_pm_runtime_resume(struct xe_device *xe)
        /* Disable access_ongoing asserts and prevent recursive pm calls */
        xe_pm_write_callback_task(xe, current);
 
+       lock_map_acquire(&xe_device_mem_access_lockdep_map);
+
        /*
         * It can be possible that xe has allowed d3cold but other pcie devices
         * in gfx card soc would have blocked d3cold, therefore card has not
@@ -250,6 +276,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
                        goto out;
        }
 out:
+       lock_map_release(&xe_device_mem_access_lockdep_map);
        xe_pm_write_callback_task(xe, NULL);
        return err;
 }