struct amdgpu_vm_manager_id,
list);
+ if (id->mgr_id->active && !fence_is_signaled(id->mgr_id->active)) {
+ struct amdgpu_vm_manager_id *mgr_id, *tmp;
+ struct list_head *head = &adev->vm_manager.ids_lru;
+ list_for_each_entry_safe(mgr_id, tmp, &adev->vm_manager.ids_lru, list) {
+ if (mgr_id->active && fence_is_signaled(mgr_id->active)) {
+ list_move(&mgr_id->list, head);
+ head = &mgr_id->list;
+ }
+ }
+ id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
+ struct amdgpu_vm_manager_id,
+ list);
+ }
+
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
if (!r) {
fence_put(id->mgr_id->active);