]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu/soc15: enable DC ip module for Raven
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index 250c8e80e646bbf800143937b6c96f6e065235ca..8fcc743dfa8675393dd2385147b0a797e41a62a9 100644 (file)
  */
 #include <linux/dma-fence-array.h>
 #include <linux/interval_tree_generic.h>
+#include <linux/idr.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+/*
+ * PASID manager
+ *
+ * PASIDs are global address space identifiers that can be shared
+ * between the GPU, an IOMMU and the driver. VMs on different devices
+ * may use the same PASID if they share the same address
+ * space. Therefore PASIDs are allocated using a global IDA. VMs are
+ * looked up from the PASID per amdgpu_device.
+ */
+static DEFINE_IDA(amdgpu_vm_pasid_ida);
+
+/**
+ * amdgpu_vm_alloc_pasid - Allocate a PASID
+ * @bits: Maximum width of the PASID in bits, must be at least 1
+ *
+ * Allocates a PASID of the given width while keeping smaller PASIDs
+ * available if possible.
+ *
+ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+ * memory allocation failure.
+ */
+int amdgpu_vm_alloc_pasid(unsigned int bits)
+{
+       int pasid = -EINVAL;
+
+       for (bits = min(bits, 31U); bits > 0; bits--) {
+               pasid = ida_simple_get(&amdgpu_vm_pasid_ida,
+                                      1U << (bits - 1), 1U << bits,
+                                      GFP_KERNEL);
+               if (pasid != -ENOSPC)
+                       break;
+       }
+
+       return pasid;
+}
+
+/**
+ * amdgpu_vm_free_pasid - Free a PASID
+ * @pasid: PASID to free
+ */
+void amdgpu_vm_free_pasid(unsigned int pasid)
+{
+       ida_simple_remove(&amdgpu_vm_pasid_ida, pasid);
+}
+
 /*
  * GPUVM
  * GPUVM is similar to the legacy gart on older asics, however
@@ -140,7 +187,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
                         struct list_head *validated,
                         struct amdgpu_bo_list_entry *entry)
 {
-       entry->robj = vm->root.bo;
+       entry->robj = vm->root.base.bo;
        entry->priority = 0;
        entry->tv.bo = &entry->robj->tbo;
        entry->tv.shared = true;
@@ -148,54 +195,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
        list_add(&entry->tv.head, validated);
 }
 
-/**
- * amdgpu_vm_validate_layer - validate a single page table level
- *
- * @parent: parent page table level
- * @validate: callback to do the validation
- * @param: parameter for the validation callback
- *
- * Validate the page table BOs on command submission if neccessary.
- */
-static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
-                                   int (*validate)(void *, struct amdgpu_bo *),
-                                   void *param, bool use_cpu_for_update)
-{
-       unsigned i;
-       int r;
-
-       if (use_cpu_for_update) {
-               r = amdgpu_bo_kmap(parent->bo, NULL);
-               if (r)
-                       return r;
-       }
-
-       if (!parent->entries)
-               return 0;
-
-       for (i = 0; i <= parent->last_entry_used; ++i) {
-               struct amdgpu_vm_pt *entry = &parent->entries[i];
-
-               if (!entry->bo)
-                       continue;
-
-               r = validate(param, entry->bo);
-               if (r)
-                       return r;
-
-               /*
-                * Recurse into the sub directory. This is harmless because we
-                * have only a maximum of 5 layers.
-                */
-               r = amdgpu_vm_validate_level(entry, validate, param,
-                                            use_cpu_for_update);
-               if (r)
-                       return r;
-       }
-
-       return r;
-}
-
 /**
  * amdgpu_vm_validate_pt_bos - validate the page table BOs
  *
@@ -210,64 +209,70 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                              int (*validate)(void *p, struct amdgpu_bo *bo),
                              void *param)
 {
-       uint64_t num_evictions;
-
-       /* We only need to validate the page tables
-        * if they aren't already valid.
-        */
-       num_evictions = atomic64_read(&adev->num_evictions);
-       if (num_evictions == vm->last_eviction_counter)
-               return 0;
+       struct ttm_bo_global *glob = adev->mman.bdev.glob;
+       int r;
 
-       return amdgpu_vm_validate_level(&vm->root, validate, param,
-                                       vm->use_cpu_for_update);
-}
+       spin_lock(&vm->status_lock);
+       while (!list_empty(&vm->evicted)) {
+               struct amdgpu_vm_bo_base *bo_base;
+               struct amdgpu_bo *bo;
 
-/**
- * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
- *
- * Move the PT BOs to the tail of the LRU.
- */
-static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
-{
-       unsigned i;
+               bo_base = list_first_entry(&vm->evicted,
+                                          struct amdgpu_vm_bo_base,
+                                          vm_status);
+               spin_unlock(&vm->status_lock);
 
-       if (!parent->entries)
-               return;
+               bo = bo_base->bo;
+               BUG_ON(!bo);
+               if (bo->parent) {
+                       r = validate(param, bo);
+                       if (r)
+                               return r;
 
-       for (i = 0; i <= parent->last_entry_used; ++i) {
-               struct amdgpu_vm_pt *entry = &parent->entries[i];
+                       spin_lock(&glob->lru_lock);
+                       ttm_bo_move_to_lru_tail(&bo->tbo);
+                       if (bo->shadow)
+                               ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
+                       spin_unlock(&glob->lru_lock);
+               }
 
-               if (!entry->bo)
-                       continue;
+               if (bo->tbo.type == ttm_bo_type_kernel &&
+                   vm->use_cpu_for_update) {
+                       r = amdgpu_bo_kmap(bo, NULL);
+                       if (r)
+                               return r;
+               }
 
-               ttm_bo_move_to_lru_tail(&entry->bo->tbo);
-               amdgpu_vm_move_level_in_lru(entry);
+               spin_lock(&vm->status_lock);
+               if (bo->tbo.type != ttm_bo_type_kernel)
+                       list_move(&bo_base->vm_status, &vm->moved);
+               else
+                       list_move(&bo_base->vm_status, &vm->relocated);
        }
+       spin_unlock(&vm->status_lock);
+
+       return 0;
 }
 
 /**
- * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
+ * amdgpu_vm_ready - check VM is ready for updates
  *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
+ * @vm: VM to check
  *
- * Move the PT BOs to the tail of the LRU.
+ * Check if all VM PDs/PTs are ready for updates
  */
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
-                                 struct amdgpu_vm *vm)
+bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 {
-       struct ttm_bo_global *glob = adev->mman.bdev.glob;
+       bool ready;
+
+       spin_lock(&vm->status_lock);
+       ready = list_empty(&vm->evicted);
+       spin_unlock(&vm->status_lock);
 
-       spin_lock(&glob->lru_lock);
-       amdgpu_vm_move_level_in_lru(&vm->root);
-       spin_unlock(&glob->lru_lock);
+       return ready;
 }
 
- /**
+/**
  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
  *
  * @adev: amdgpu_device pointer
@@ -288,6 +293,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
        unsigned pt_idx, from, to;
        int r;
        u64 flags;
+       uint64_t init_value = 0;
 
        if (!parent->entries) {
                unsigned num_entries = amdgpu_vm_num_entries(adev, level);
@@ -321,19 +327,25 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
                                AMDGPU_GEM_CREATE_SHADOW);
 
+       if (vm->pte_support_ats) {
+               init_value = AMDGPU_PTE_SYSTEM;
+               if (level != adev->vm_manager.num_level - 1)
+                       init_value |= AMDGPU_PDE_PTE;
+       }
+
        /* walk over the address space and allocate the page tables */
        for (pt_idx = from; pt_idx <= to; ++pt_idx) {
-               struct reservation_object *resv = vm->root.bo->tbo.resv;
+               struct reservation_object *resv = vm->root.base.bo->tbo.resv;
                struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
                struct amdgpu_bo *pt;
 
-               if (!entry->bo) {
+               if (!entry->base.bo) {
                        r = amdgpu_bo_create(adev,
                                             amdgpu_vm_bo_size(adev, level),
                                             AMDGPU_GPU_PAGE_SIZE, true,
                                             AMDGPU_GEM_DOMAIN_VRAM,
                                             flags,
-                                            NULL, resv, &pt);
+                                            NULL, resv, init_value, &pt);
                        if (r)
                                return r;
 
@@ -348,11 +360,15 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                        /* Keep a reference to the root directory to avoid
                        * freeing them up in the wrong order.
                        */
-                       pt->parent = amdgpu_bo_ref(vm->root.bo);
-
-                       entry->bo = pt;
+                       pt->parent = amdgpu_bo_ref(parent->base.bo);
+
+                       entry->base.vm = vm;
+                       entry->base.bo = pt;
+                       list_add_tail(&entry->base.bo_list, &pt->va);
+                       spin_lock(&vm->status_lock);
+                       list_add(&entry->base.vm_status, &vm->relocated);
+                       spin_unlock(&vm->status_lock);
                        entry->addr = 0;
-                       entry->huge_page = false;
                }
 
                if (level < adev->vm_manager.num_level) {
@@ -892,8 +908,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 {
        struct amdgpu_bo_va *bo_va;
 
-       list_for_each_entry(bo_va, &bo->va, bo_list) {
-               if (bo_va->vm == vm) {
+       list_for_each_entry(bo_va, &bo->va, base.bo_list) {
+               if (bo_va->base.vm == vm) {
                        return bo_va;
                }
        }
@@ -1018,7 +1034,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        int r;
 
        amdgpu_sync_create(&sync);
-       amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
+       amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner);
        r = amdgpu_sync_wait(&sync, true);
        amdgpu_sync_free(&sync);
 
@@ -1037,18 +1053,17 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  */
 static int amdgpu_vm_update_level(struct amdgpu_device *adev,
                                  struct amdgpu_vm *vm,
-                                 struct amdgpu_vm_pt *parent,
-                                 unsigned level)
+                                 struct amdgpu_vm_pt *parent)
 {
        struct amdgpu_bo *shadow;
        struct amdgpu_ring *ring = NULL;
        uint64_t pd_addr, shadow_addr = 0;
-       uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
        uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
        unsigned count = 0, pt_idx, ndw = 0;
        struct amdgpu_job *job;
        struct amdgpu_pte_update_params params;
        struct dma_fence *fence = NULL;
+       uint32_t incr;
 
        int r;
 
@@ -1057,21 +1072,16 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 
        memset(&params, 0, sizeof(params));
        params.adev = adev;
-       shadow = parent->bo->shadow;
+       shadow = parent->base.bo->shadow;
 
        if (vm->use_cpu_for_update) {
-               pd_addr = (unsigned long)parent->bo->kptr;
+               pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
                r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
                if (unlikely(r))
                        return r;
 
                params.func = amdgpu_vm_cpu_set_ptes;
        } else {
-               if (shadow) {
-                       r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
-                       if (r)
-                               return r;
-               }
                ring = container_of(vm->entity.sched, struct amdgpu_ring,
                                    sched);
 
@@ -1081,7 +1091,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
                /* assume the worst case */
                ndw += parent->last_entry_used * 6;
 
-               pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+               pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
 
                if (shadow) {
                        shadow_addr = amdgpu_bo_gpu_offset(shadow);
@@ -1101,30 +1111,28 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 
        /* walk over the address space and update the directory */
        for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
-               struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
+               struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+               struct amdgpu_bo *bo = entry->base.bo;
                uint64_t pde, pt;
 
                if (bo == NULL)
                        continue;
 
-               if (bo->shadow) {
-                       struct amdgpu_bo *pt_shadow = bo->shadow;
-
-                       r = amdgpu_ttm_bind(&pt_shadow->tbo,
-                                           &pt_shadow->tbo.mem);
-                       if (r)
-                               return r;
-               }
+               spin_lock(&vm->status_lock);
+               list_del_init(&entry->base.vm_status);
+               spin_unlock(&vm->status_lock);
 
                pt = amdgpu_bo_gpu_offset(bo);
                pt = amdgpu_gart_get_vm_pde(adev, pt);
-               if (parent->entries[pt_idx].addr == pt ||
-                   parent->entries[pt_idx].huge_page)
+               /* Don't update huge pages here */
+               if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
+                   parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
                        continue;
 
-               parent->entries[pt_idx].addr = pt;
+               parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
 
                pde = pd_addr + pt_idx * 8;
+               incr = amdgpu_bo_size(bo);
                if (((last_pde + 8 * count) != pde) ||
                    ((last_pt + incr * count) != pt) ||
                    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
@@ -1152,7 +1160,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
        }
 
        if (count) {
-               if (vm->root.bo->shadow)
+               if (vm->root.base.bo->shadow)
                        params.func(&params, last_shadow, last_pt,
                                    count, incr, AMDGPU_PTE_VALID);
 
@@ -1165,7 +1173,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
                        amdgpu_job_free(job);
                } else {
                        amdgpu_ring_pad_ib(ring, params.ib);
-                       amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
+                       amdgpu_sync_resv(adev, &job->sync,
+                                        parent->base.bo->tbo.resv,
                                         AMDGPU_FENCE_OWNER_VM);
                        if (shadow)
                                amdgpu_sync_resv(adev, &job->sync,
@@ -1178,26 +1187,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
                        if (r)
                                goto error_free;
 
-                       amdgpu_bo_fence(parent->bo, fence, true);
-                       dma_fence_put(vm->last_dir_update);
-                       vm->last_dir_update = dma_fence_get(fence);
-                       dma_fence_put(fence);
+                       amdgpu_bo_fence(parent->base.bo, fence, true);
+                       dma_fence_put(vm->last_update);
+                       vm->last_update = fence;
                }
        }
-       /*
-        * Recurse into the subdirectories. This recursion is harmless because
-        * we only have a maximum of 5 layers.
-        */
-       for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
-               struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
-
-               if (!entry->bo)
-                       continue;
-
-               r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
-               if (r)
-                       return r;
-       }
 
        return 0;
 
@@ -1213,7 +1207,8 @@ error_free:
  *
  * Mark all PD level as invalid after an error.
  */
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
+                                      struct amdgpu_vm_pt *parent)
 {
        unsigned pt_idx;
 
@@ -1224,11 +1219,15 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
        for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
                struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
 
-               if (!entry->bo)
+               if (!entry->base.bo)
                        continue;
 
                entry->addr = ~0ULL;
-               amdgpu_vm_invalidate_level(entry);
+               spin_lock(&vm->status_lock);
+               if (list_empty(&entry->base.vm_status))
+                       list_add(&entry->base.vm_status, &vm->relocated);
+               spin_unlock(&vm->status_lock);
+               amdgpu_vm_invalidate_level(vm, entry);
        }
 }
 
@@ -1246,9 +1245,38 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
 {
        int r;
 
-       r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
-       if (r)
-               amdgpu_vm_invalidate_level(&vm->root);
+       spin_lock(&vm->status_lock);
+       while (!list_empty(&vm->relocated)) {
+               struct amdgpu_vm_bo_base *bo_base;
+               struct amdgpu_bo *bo;
+
+               bo_base = list_first_entry(&vm->relocated,
+                                          struct amdgpu_vm_bo_base,
+                                          vm_status);
+               spin_unlock(&vm->status_lock);
+
+               bo = bo_base->bo->parent;
+               if (bo) {
+                       struct amdgpu_vm_bo_base *parent;
+                       struct amdgpu_vm_pt *pt;
+
+                       parent = list_first_entry(&bo->va,
+                                                 struct amdgpu_vm_bo_base,
+                                                 bo_list);
+                       pt = container_of(parent, struct amdgpu_vm_pt, base);
+
+                       r = amdgpu_vm_update_level(adev, vm, pt);
+                       if (r) {
+                               amdgpu_vm_invalidate_level(vm, &vm->root);
+                               return r;
+                       }
+                       spin_lock(&vm->status_lock);
+               } else {
+                       spin_lock(&vm->status_lock);
+                       list_del_init(&bo_base->vm_status);
+               }
+       }
+       spin_unlock(&vm->status_lock);
 
        if (vm->use_cpu_for_update) {
                /* Flush HDP */
@@ -1279,7 +1307,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
        *entry = &p->vm->root;
        while ((*entry)->entries) {
                idx = addr >> (p->adev->vm_manager.block_size * level--);
-               idx %= amdgpu_bo_size((*entry)->bo) / 8;
+               idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
                *parent = *entry;
                *entry = &(*entry)->entries[idx];
        }
@@ -1300,55 +1328,62 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
  *
  * Check if we can update the PD with a huge page.
  */
-static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
-                                      struct amdgpu_vm_pt *entry,
-                                      struct amdgpu_vm_pt *parent,
-                                      unsigned nptes, uint64_t dst,
-                                      uint64_t flags)
+static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+                                       struct amdgpu_vm_pt *entry,
+                                       struct amdgpu_vm_pt *parent,
+                                       unsigned nptes, uint64_t dst,
+                                       uint64_t flags)
 {
        bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
        uint64_t pd_addr, pde;
-       int r;
 
        /* In the case of a mixed PT the PDE must point to it*/
        if (p->adev->asic_type < CHIP_VEGA10 ||
            nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
-           p->func == amdgpu_vm_do_copy_ptes ||
+           p->src ||
            !(flags & AMDGPU_PTE_VALID)) {
 
-               dst = amdgpu_bo_gpu_offset(entry->bo);
+               dst = amdgpu_bo_gpu_offset(entry->base.bo);
                dst = amdgpu_gart_get_vm_pde(p->adev, dst);
                flags = AMDGPU_PTE_VALID;
        } else {
+               /* Set the huge page flag to stop scanning at this PDE */
                flags |= AMDGPU_PDE_PTE;
        }
 
-       if (entry->addr == dst &&
-           entry->huge_page == !!(flags & AMDGPU_PDE_PTE))
-               return 0;
+       if (entry->addr == (dst | flags))
+               return;
 
-       entry->addr = dst;
-       entry->huge_page = !!(flags & AMDGPU_PDE_PTE);
+       entry->addr = (dst | flags);
 
        if (use_cpu_update) {
-               r = amdgpu_bo_kmap(parent->bo, (void *)&pd_addr);
-               if (r)
-                       return r;
+               /* In case a huge page is replaced with a system
+                * memory mapping, p->pages_addr != NULL and
+                * amdgpu_vm_cpu_set_ptes would try to translate dst
+                * through amdgpu_vm_map_gart. But dst is already a
+                * GPU address (of the page table). Disable
+                * amdgpu_vm_map_gart temporarily.
+                */
+               dma_addr_t *tmp;
+
+               tmp = p->pages_addr;
+               p->pages_addr = NULL;
 
+               pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
                pde = pd_addr + (entry - parent->entries) * 8;
                amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
+
+               p->pages_addr = tmp;
        } else {
-               if (parent->bo->shadow) {
-                       pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
+               if (parent->base.bo->shadow) {
+                       pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
                        pde = pd_addr + (entry - parent->entries) * 8;
                        amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
                }
-               pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+               pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
                pde = pd_addr + (entry - parent->entries) * 8;
                amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
        }
-
-       return 0;
 }
 
 /**
@@ -1375,7 +1410,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
        struct amdgpu_bo *pt;
        unsigned nptes;
        bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
-       int r;
 
        /* walk over the address space and update the page tables */
        for (addr = start; addr < end; addr += nptes,
@@ -1391,17 +1425,15 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                else
                        nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
 
-               r = amdgpu_vm_handle_huge_pages(params, entry, parent,
-                                               nptes, dst, flags);
-               if (r)
-                       return r;
-
-               if (entry->huge_page)
+               amdgpu_vm_handle_huge_pages(params, entry, parent,
+                                           nptes, dst, flags);
+               /* We don't need to update PTEs for huge pages */
+               if (entry->addr & AMDGPU_PDE_PTE)
                        continue;
 
-               pt = entry->bo;
+               pt = entry->base.bo;
                if (use_cpu_update) {
-                       pe_start = (unsigned long)pt->kptr;
+                       pe_start = (unsigned long)amdgpu_bo_kptr(pt);
                } else {
                        if (pt->shadow) {
                                pe_start = amdgpu_bo_gpu_offset(pt->shadow);
@@ -1435,8 +1467,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params    *params,
                                uint64_t start, uint64_t end,
                                uint64_t dst, uint64_t flags)
 {
-       int r;
-
        /**
         * The MC L1 TLB supports variable sized pages, based on a fragment
         * field in the PTE. When this field is set to a non-zero value, page
@@ -1455,41 +1485,38 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params  *params,
         * Userspace can support this by aligning virtual base address and
         * allocation size to the fragment size.
         */
-
-       /* SI and newer are optimized for 64KB */
-       unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev);
-       uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
-       uint64_t frag_align = 1 << pages_per_frag;
-
-       uint64_t frag_start = ALIGN(start, frag_align);
-       uint64_t frag_end = end & ~(frag_align - 1);
+       unsigned max_frag = params->adev->vm_manager.fragment_size;
+       int r;
 
        /* system pages are non continuously */
-       if (params->src || !(flags & AMDGPU_PTE_VALID) ||
-           (frag_start >= frag_end))
+       if (params->src || !(flags & AMDGPU_PTE_VALID))
                return amdgpu_vm_update_ptes(params, start, end, dst, flags);
 
-       /* handle the 4K area at the beginning */
-       if (start != frag_start) {
-               r = amdgpu_vm_update_ptes(params, start, frag_start,
-                                         dst, flags);
+       while (start != end) {
+               uint64_t frag_flags, frag_end;
+               unsigned frag;
+
+               /* This intentionally wraps around if no bit is set */
+               frag = min((unsigned)ffs(start) - 1,
+                          (unsigned)fls64(end - start) - 1);
+               if (frag >= max_frag) {
+                       frag_flags = AMDGPU_PTE_FRAG(max_frag);
+                       frag_end = end & ~((1ULL << max_frag) - 1);
+               } else {
+                       frag_flags = AMDGPU_PTE_FRAG(frag);
+                       frag_end = start + (1 << frag);
+               }
+
+               r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
+                                         flags | frag_flags);
                if (r)
                        return r;
-               dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
-       }
 
-       /* handle the area in the middle */
-       r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
-                                 flags | frag_flags);
-       if (r)
-               return r;
-
-       /* handle the 4K area at the end */
-       if (frag_end != end) {
-               dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
-               r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
+               dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
+               start = frag_end;
        }
-       return r;
+
+       return 0;
 }
 
 /**
@@ -1497,7 +1524,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params    *params,
  *
  * @adev: amdgpu_device pointer
  * @exclusive: fence we need to sync to
- * @src: address where to copy page table entries from
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @start: start of mapped range
@@ -1511,7 +1537,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params    *params,
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                       struct dma_fence *exclusive,
-                                      uint64_t src,
                                       dma_addr_t *pages_addr,
                                       struct amdgpu_vm *vm,
                                       uint64_t start, uint64_t last,
@@ -1529,7 +1554,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
-       params.src = src;
 
        /* sync to everything on unmapping */
        if (!(flags & AMDGPU_PTE_VALID))
@@ -1558,10 +1582,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        nptes = last - start + 1;
 
        /*
-        * reserve space for one command every (1 << BLOCK_SIZE)
+        * reserve space for two commands every (1 << BLOCK_SIZE)
         *  entries or 2k dwords (whatever is smaller)
+         *
+         * The second command is for the shadow pagetables.
         */
-       ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
+       ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
 
        /* padding, etc. */
        ndw = 64;
@@ -1569,15 +1595,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        /* one PDE write for each huge page */
        ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
 
-       if (src) {
-               /* only copy commands needed */
-               ndw += ncmds * 7;
-
-               params.func = amdgpu_vm_do_copy_ptes;
-
-       } else if (pages_addr) {
+       if (pages_addr) {
                /* copy commands needed */
-               ndw += ncmds * 7;
+               ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
 
                /* and also PTEs */
                ndw += nptes * 2;
@@ -1586,10 +1606,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        } else {
                /* set page commands needed */
-               ndw += ncmds * 10;
+               ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
 
-               /* two extra commands for begin/end of fragment */
-               ndw += 2 * 10;
+               /* extra commands for begin/end fragments */
+               ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw
+                               * adev->vm_manager.fragment_size;
 
                params.func = amdgpu_vm_do_set_ptes;
        }
@@ -1600,7 +1621,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        params.ib = &job->ibs[0];
 
-       if (!src && pages_addr) {
+       if (pages_addr) {
                uint64_t *pte;
                unsigned i;
 
@@ -1621,12 +1642,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        if (r)
                goto error_free;
 
-       r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
+       r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
                             owner);
        if (r)
                goto error_free;
 
-       r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
+       r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
        if (r)
                goto error_free;
 
@@ -1641,14 +1662,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        if (r)
                goto error_free;
 
-       amdgpu_bo_fence(vm->root.bo, f, true);
+       amdgpu_bo_fence(vm->root.base.bo, f, true);
        dma_fence_put(*fence);
        *fence = f;
        return 0;
 
 error_free:
        amdgpu_job_free(job);
-       amdgpu_vm_invalidate_level(&vm->root);
+       amdgpu_vm_invalidate_level(vm, &vm->root);
        return r;
 }
 
@@ -1657,7 +1678,6 @@ error_free:
  *
  * @adev: amdgpu_device pointer
  * @exclusive: fence we need to sync to
- * @gtt_flags: flags as they are used for GTT
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
@@ -1671,7 +1691,6 @@ error_free:
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                      struct dma_fence *exclusive,
-                                     uint64_t gtt_flags,
                                      dma_addr_t *pages_addr,
                                      struct amdgpu_vm *vm,
                                      struct amdgpu_bo_va_mapping *mapping,
@@ -1679,7 +1698,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                      struct drm_mm_node *nodes,
                                      struct dma_fence **fence)
 {
-       uint64_t pfn, src = 0, start = mapping->start;
+       uint64_t pfn, start = mapping->start;
        int r;
 
        /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1726,11 +1745,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                }
 
                if (pages_addr) {
-                       if (flags == gtt_flags)
-                               src = adev->gart.table_addr +
-                                       (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
-                       else
-                               max_entries = min(max_entries, 16ull * 1024ull);
+                       max_entries = min(max_entries, 16ull * 1024ull);
                        addr = 0;
                } else if (flags & AMDGPU_PTE_VALID) {
                        addr += adev->vm_manager.vram_base_offset;
@@ -1738,8 +1753,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                addr += pfn << PAGE_SHIFT;
 
                last = min((uint64_t)mapping->last, start + max_entries - 1);
-               r = amdgpu_vm_bo_update_mapping(adev, exclusive,
-                                               src, pages_addr, vm,
+               r = amdgpu_vm_bo_update_mapping(adev, exclusive, pages_addr, vm,
                                                start, last, flags, addr,
                                                fence);
                if (r)
@@ -1771,75 +1785,75 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
                        bool clear)
 {
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_bo *bo = bo_va->base.bo;
+       struct amdgpu_vm *vm = bo_va->base.vm;
        struct amdgpu_bo_va_mapping *mapping;
        dma_addr_t *pages_addr = NULL;
-       uint64_t gtt_flags, flags;
        struct ttm_mem_reg *mem;
        struct drm_mm_node *nodes;
-       struct dma_fence *exclusive;
+       struct dma_fence *exclusive, **last_update;
+       uint64_t flags;
        int r;
 
-       if (clear || !bo_va->bo) {
+       if (clear || !bo_va->base.bo) {
                mem = NULL;
                nodes = NULL;
                exclusive = NULL;
        } else {
                struct ttm_dma_tt *ttm;
 
-               mem = &bo_va->bo->tbo.mem;
+               mem = &bo_va->base.bo->tbo.mem;
                nodes = mem->mm_node;
                if (mem->mem_type == TTM_PL_TT) {
-                       ttm = container_of(bo_va->bo->tbo.ttm, struct
-                                          ttm_dma_tt, ttm);
+                       ttm = container_of(bo_va->base.bo->tbo.ttm,
+                                          struct ttm_dma_tt, ttm);
                        pages_addr = ttm->dma_address;
                }
-               exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
+               exclusive = reservation_object_get_excl(bo->tbo.resv);
        }
 
-       if (bo_va->bo) {
-               flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-               gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
-                       adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
-                       flags : 0;
-       } else {
+       if (bo)
+               flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+       else
                flags = 0x0;
-               gtt_flags = ~0x0;
-       }
 
-       spin_lock(&vm->status_lock);
-       if (!list_empty(&bo_va->vm_status))
+       if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
+               last_update = &vm->last_update;
+       else
+               last_update = &bo_va->last_pt_update;
+
+       if (!clear && bo_va->base.moved) {
+               bo_va->base.moved = false;
                list_splice_init(&bo_va->valids, &bo_va->invalids);
-       spin_unlock(&vm->status_lock);
+
+       } else if (bo_va->cleared != clear) {
+               list_splice_init(&bo_va->valids, &bo_va->invalids);
+       }
 
        list_for_each_entry(mapping, &bo_va->invalids, list) {
-               r = amdgpu_vm_bo_split_mapping(adev, exclusive,
-                                              gtt_flags, pages_addr, vm,
+               r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
                                               mapping, flags, nodes,
-                                              &bo_va->last_pt_update);
+                                              last_update);
                if (r)
                        return r;
        }
 
-       if (trace_amdgpu_vm_bo_mapping_enabled()) {
-               list_for_each_entry(mapping, &bo_va->valids, list)
-                       trace_amdgpu_vm_bo_mapping(mapping);
-
-               list_for_each_entry(mapping, &bo_va->invalids, list)
-                       trace_amdgpu_vm_bo_mapping(mapping);
+       if (vm->use_cpu_for_update) {
+               /* Flush HDP */
+               mb();
+               amdgpu_gart_flush_gpu_tlb(adev, 0);
        }
 
        spin_lock(&vm->status_lock);
-       list_splice_init(&bo_va->invalids, &bo_va->valids);
-       list_del_init(&bo_va->vm_status);
-       if (clear)
-               list_add(&bo_va->vm_status, &vm->cleared);
+       list_del_init(&bo_va->base.vm_status);
        spin_unlock(&vm->status_lock);
 
-       if (vm->use_cpu_for_update) {
-               /* Flush HDP */
-               mb();
-               amdgpu_gart_flush_gpu_tlb(adev, 0);
+       list_splice_init(&bo_va->invalids, &bo_va->valids);
+       bo_va->cleared = clear;
+
+       if (trace_amdgpu_vm_bo_mapping_enabled()) {
+               list_for_each_entry(mapping, &bo_va->valids, list)
+                       trace_amdgpu_vm_bo_mapping(mapping);
        }
 
        return 0;
@@ -1947,7 +1961,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
  */
 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
-       struct reservation_object *resv = vm->root.bo->tbo.resv;
+       struct reservation_object *resv = vm->root.base.bo->tbo.resv;
        struct dma_fence *excl, **shared;
        unsigned i, shared_count;
        int r;
@@ -1995,15 +2009,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
        struct amdgpu_bo_va_mapping *mapping;
        struct dma_fence *f = NULL;
        int r;
+       uint64_t init_pte_value = 0;
 
        while (!list_empty(&vm->freed)) {
                mapping = list_first_entry(&vm->freed,
                        struct amdgpu_bo_va_mapping, list);
                list_del(&mapping->list);
 
-               r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
+               if (vm->pte_support_ats)
+                       init_pte_value = AMDGPU_PTE_SYSTEM;
+
+               r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
                                                mapping->start, mapping->last,
-                                               0, 0, &f);
+                                               init_pte_value, 0, &f);
                amdgpu_vm_free_mapping(adev, vm, mapping, f);
                if (r) {
                        dma_fence_put(f);
@@ -2023,29 +2041,35 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
+ * amdgpu_vm_handle_moved - handle moved BOs in the PT
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
+ * @sync: sync object to add fences to
  *
- * Make sure all invalidated BOs are cleared in the PT.
+ * Make sure all BOs which are moved are updated in the PTs.
  * Returns 0 for success.
  *
- * PTs have to be reserved and mutex must be locked!
+ * PTs have to be reserved!
  */
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
-                            struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+                          struct amdgpu_vm *vm)
 {
-       struct amdgpu_bo_va *bo_va = NULL;
+       bool clear;
        int r = 0;
 
        spin_lock(&vm->status_lock);
-       while (!list_empty(&vm->invalidated)) {
-               bo_va = list_first_entry(&vm->invalidated,
-                       struct amdgpu_bo_va, vm_status);
+       while (!list_empty(&vm->moved)) {
+               struct amdgpu_bo_va *bo_va;
+
+               bo_va = list_first_entry(&vm->moved,
+                       struct amdgpu_bo_va, base.vm_status);
                spin_unlock(&vm->status_lock);
 
-               r = amdgpu_vm_bo_update(adev, bo_va, true);
+               /* Per VM BOs never need to bo cleared in the page tables */
+               clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;
+
+               r = amdgpu_vm_bo_update(adev, bo_va, clear);
                if (r)
                        return r;
 
@@ -2053,9 +2077,6 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
        }
        spin_unlock(&vm->status_lock);
 
-       if (bo_va)
-               r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
-
        return r;
 }
 
@@ -2082,20 +2103,54 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        if (bo_va == NULL) {
                return NULL;
        }
-       bo_va->vm = vm;
-       bo_va->bo = bo;
+       bo_va->base.vm = vm;
+       bo_va->base.bo = bo;
+       INIT_LIST_HEAD(&bo_va->base.bo_list);
+       INIT_LIST_HEAD(&bo_va->base.vm_status);
+
        bo_va->ref_count = 1;
-       INIT_LIST_HEAD(&bo_va->bo_list);
        INIT_LIST_HEAD(&bo_va->valids);
        INIT_LIST_HEAD(&bo_va->invalids);
-       INIT_LIST_HEAD(&bo_va->vm_status);
 
        if (bo)
-               list_add_tail(&bo_va->bo_list, &bo->va);
+               list_add_tail(&bo_va->base.bo_list, &bo->va);
 
        return bo_va;
 }
 
+
+/**
+ * amdgpu_vm_bo_insert_mapping - insert a new mapping
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to store the address
+ * @mapping: the mapping to insert
+ *
+ * Insert a new mapping into all structures.
+ */
+static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+                                   struct amdgpu_bo_va *bo_va,
+                                   struct amdgpu_bo_va_mapping *mapping)
+{
+       struct amdgpu_vm *vm = bo_va->base.vm;
+       struct amdgpu_bo *bo = bo_va->base.bo;
+
+       mapping->bo_va = bo_va;
+       list_add(&mapping->list, &bo_va->invalids);
+       amdgpu_vm_it_insert(mapping, &vm->va);
+
+       if (mapping->flags & AMDGPU_PTE_PRT)
+               amdgpu_vm_prt_get(adev);
+
+       if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+               spin_lock(&vm->status_lock);
+               if (list_empty(&bo_va->base.vm_status))
+                       list_add(&bo_va->base.vm_status, &vm->moved);
+               spin_unlock(&vm->status_lock);
+       }
+       trace_amdgpu_vm_bo_map(bo_va, mapping);
+}
+
 /**
  * amdgpu_vm_bo_map - map bo inside a vm
  *
@@ -2116,7 +2171,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                     uint64_t size, uint64_t flags)
 {
        struct amdgpu_bo_va_mapping *mapping, *tmp;
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_bo *bo = bo_va->base.bo;
+       struct amdgpu_vm *vm = bo_va->base.vm;
        uint64_t eaddr;
 
        /* validate the parameters */
@@ -2127,7 +2183,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        /* make sure object fit at this offset */
        eaddr = saddr + size - 1;
        if (saddr >= eaddr ||
-           (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+           (bo && offset + size > amdgpu_bo_size(bo)))
                return -EINVAL;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2137,7 +2193,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        if (tmp) {
                /* bo and tmp overlap, invalid addr */
                dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
-                       "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+                       "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
                        tmp->start, tmp->last + 1);
                return -EINVAL;
        }
@@ -2146,17 +2202,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        if (!mapping)
                return -ENOMEM;
 
-       INIT_LIST_HEAD(&mapping->list);
        mapping->start = saddr;
        mapping->last = eaddr;
        mapping->offset = offset;
        mapping->flags = flags;
 
-       list_add(&mapping->list, &bo_va->invalids);
-       amdgpu_vm_it_insert(mapping, &vm->va);
-
-       if (flags & AMDGPU_PTE_PRT)
-               amdgpu_vm_prt_get(adev);
+       amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
 
        return 0;
 }
@@ -2182,7 +2233,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
                             uint64_t size, uint64_t flags)
 {
        struct amdgpu_bo_va_mapping *mapping;
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_bo *bo = bo_va->base.bo;
        uint64_t eaddr;
        int r;
 
@@ -2194,7 +2245,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        /* make sure object fit at this offset */
        eaddr = saddr + size - 1;
        if (saddr >= eaddr ||
-           (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+           (bo && offset + size > amdgpu_bo_size(bo)))
                return -EINVAL;
 
        /* Allocate all the needed memory */
@@ -2202,7 +2253,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        if (!mapping)
                return -ENOMEM;
 
-       r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
+       r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
        if (r) {
                kfree(mapping);
                return r;
@@ -2216,11 +2267,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        mapping->offset = offset;
        mapping->flags = flags;
 
-       list_add(&mapping->list, &bo_va->invalids);
-       amdgpu_vm_it_insert(mapping, &vm->va);
-
-       if (flags & AMDGPU_PTE_PRT)
-               amdgpu_vm_prt_get(adev);
+       amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
 
        return 0;
 }
@@ -2242,7 +2289,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
                       uint64_t saddr)
 {
        struct amdgpu_bo_va_mapping *mapping;
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_vm *vm = bo_va->base.vm;
        bool valid = true;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2266,6 +2313,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 
        list_del(&mapping->list);
        amdgpu_vm_it_remove(mapping, &vm->va);
+       mapping->bo_va = NULL;
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
        if (valid)
@@ -2351,6 +2399,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                if (tmp->last > eaddr)
                    tmp->last = eaddr;
 
+               tmp->bo_va = NULL;
                list_add(&tmp->list, &vm->freed);
                trace_amdgpu_vm_bo_unmap(NULL, tmp);
        }
@@ -2376,6 +2425,19 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
        return 0;
 }
 
+/**
+ * amdgpu_vm_bo_lookup_mapping - find mapping by address
+ *
+ * @vm: the requested VM
+ *
+ * Find a mapping by it's address.
+ */
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+                                                        uint64_t addr)
+{
+       return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
+}
+
 /**
  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
  *
@@ -2390,17 +2452,18 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va)
 {
        struct amdgpu_bo_va_mapping *mapping, *next;
-       struct amdgpu_vm *vm = bo_va->vm;
+       struct amdgpu_vm *vm = bo_va->base.vm;
 
-       list_del(&bo_va->bo_list);
+       list_del(&bo_va->base.bo_list);
 
        spin_lock(&vm->status_lock);
-       list_del(&bo_va->vm_status);
+       list_del(&bo_va->base.vm_status);
        spin_unlock(&vm->status_lock);
 
        list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
                list_del(&mapping->list);
                amdgpu_vm_it_remove(mapping, &vm->va);
+               mapping->bo_va = NULL;
                trace_amdgpu_vm_bo_unmap(bo_va, mapping);
                list_add(&mapping->list, &vm->freed);
        }
@@ -2425,15 +2488,37 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
  * Mark @bo as invalid.
  */
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
-                            struct amdgpu_bo *bo)
+                            struct amdgpu_bo *bo, bool evicted)
 {
-       struct amdgpu_bo_va *bo_va;
+       struct amdgpu_vm_bo_base *bo_base;
+
+       list_for_each_entry(bo_base, &bo->va, bo_list) {
+               struct amdgpu_vm *vm = bo_base->vm;
+
+               bo_base->moved = true;
+               if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+                       spin_lock(&bo_base->vm->status_lock);
+                       if (bo->tbo.type == ttm_bo_type_kernel)
+                               list_move(&bo_base->vm_status, &vm->evicted);
+                       else
+                               list_move_tail(&bo_base->vm_status,
+                                              &vm->evicted);
+                       spin_unlock(&bo_base->vm->status_lock);
+                       continue;
+               }
+
+               if (bo->tbo.type == ttm_bo_type_kernel) {
+                       spin_lock(&bo_base->vm->status_lock);
+                       if (list_empty(&bo_base->vm_status))
+                               list_add(&bo_base->vm_status, &vm->relocated);
+                       spin_unlock(&bo_base->vm->status_lock);
+                       continue;
+               }
 
-       list_for_each_entry(bo_va, &bo->va, bo_list) {
-               spin_lock(&bo_va->vm->status_lock);
-               if (list_empty(&bo_va->vm_status))
-                       list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
-               spin_unlock(&bo_va->vm->status_lock);
+               spin_lock(&bo_base->vm->status_lock);
+               if (list_empty(&bo_base->vm_status))
+                       list_add(&bo_base->vm_status, &vm->moved);
+               spin_unlock(&bo_base->vm->status_lock);
        }
 }
 
@@ -2451,12 +2536,26 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
 }
 
 /**
- * amdgpu_vm_adjust_size - adjust vm size and block size
+ * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
+ *
+ * @adev: amdgpu_device pointer
+ * @fragment_size_default: the default fragment size if it's set auto
+ */
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
+{
+       if (amdgpu_vm_fragment_size == -1)
+               adev->vm_manager.fragment_size = fragment_size_default;
+       else
+               adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
+}
+
+/**
+ * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
  *
  * @adev: amdgpu_device pointer
  * @vm_size: the default vm size if it's set auto
  */
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
 {
        /* adjust vm size firstly */
        if (amdgpu_vm_size == -1)
@@ -2471,8 +2570,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
        else
                adev->vm_manager.block_size = amdgpu_vm_block_size;
 
-       DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
-               adev->vm_manager.vm_size, adev->vm_manager.block_size);
+       amdgpu_vm_set_fragment_size(adev, fragment_size_default);
+
+       DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
+               adev->vm_manager.vm_size, adev->vm_manager.block_size,
+               adev->vm_manager.fragment_size);
 }
 
 /**
@@ -2485,7 +2587,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
  * Init @vm fields.
  */
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                  int vm_context)
+                  int vm_context, unsigned int pasid)
 {
        const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
                AMDGPU_VM_PTE_COUNT(adev) * 8);
@@ -2494,14 +2596,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        struct amd_sched_rq *rq;
        int r, i;
        u64 flags;
+       uint64_t init_pde_value = 0;
 
        vm->va = RB_ROOT;
        vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
                vm->reserved_vmid[i] = NULL;
        spin_lock_init(&vm->status_lock);
-       INIT_LIST_HEAD(&vm->invalidated);
-       INIT_LIST_HEAD(&vm->cleared);
+       INIT_LIST_HEAD(&vm->evicted);
+       INIT_LIST_HEAD(&vm->relocated);
+       INIT_LIST_HEAD(&vm->moved);
        INIT_LIST_HEAD(&vm->freed);
 
        /* create scheduler entity for page table updates */
@@ -2515,17 +2619,24 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        if (r)
                return r;
 
-       if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
+       vm->pte_support_ats = false;
+
+       if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
                vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
                                                AMDGPU_VM_USE_CPU_FOR_COMPUTE);
-       else
+
+               if (adev->asic_type == CHIP_RAVEN) {
+                       vm->pte_support_ats = true;
+                       init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
+               }
+       } else
                vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
                                                AMDGPU_VM_USE_CPU_FOR_GFX);
        DRM_DEBUG_DRIVER("VM update mode is %s\n",
                         vm->use_cpu_for_update ? "CPU" : "SDMA");
        WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
                  "CPU update of VM recommended only for large BAR system\n");
-       vm->last_dir_update = NULL;
+       vm->last_update = NULL;
 
        flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
                        AMDGPU_GEM_CREATE_VRAM_CLEARED;
@@ -2538,30 +2649,46 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
                             AMDGPU_GEM_DOMAIN_VRAM,
                             flags,
-                            NULL, NULL, &vm->root.bo);
+                            NULL, NULL, init_pde_value, &vm->root.base.bo);
        if (r)
                goto error_free_sched_entity;
 
-       r = amdgpu_bo_reserve(vm->root.bo, false);
-       if (r)
-               goto error_free_root;
-
-       vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+       vm->root.base.vm = vm;
+       list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
+       INIT_LIST_HEAD(&vm->root.base.vm_status);
 
        if (vm->use_cpu_for_update) {
-               r = amdgpu_bo_kmap(vm->root.bo, NULL);
+               r = amdgpu_bo_reserve(vm->root.base.bo, false);
+               if (r)
+                       goto error_free_root;
+
+               r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
+               amdgpu_bo_unreserve(vm->root.base.bo);
                if (r)
                        goto error_free_root;
        }
 
-       amdgpu_bo_unreserve(vm->root.bo);
+       if (pasid) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+               r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
+                             GFP_ATOMIC);
+               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+               if (r < 0)
+                       goto error_free_root;
+
+               vm->pasid = pasid;
+       }
+
+       INIT_KFIFO(vm->faults);
 
        return 0;
 
 error_free_root:
-       amdgpu_bo_unref(&vm->root.bo->shadow);
-       amdgpu_bo_unref(&vm->root.bo);
-       vm->root.bo = NULL;
+       amdgpu_bo_unref(&vm->root.base.bo->shadow);
+       amdgpu_bo_unref(&vm->root.base.bo);
+       vm->root.base.bo = NULL;
 
 error_free_sched_entity:
        amd_sched_entity_fini(&ring->sched, &vm->entity);
@@ -2580,9 +2707,11 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
 {
        unsigned i;
 
-       if (level->bo) {
-               amdgpu_bo_unref(&level->bo->shadow);
-               amdgpu_bo_unref(&level->bo);
+       if (level->base.bo) {
+               list_del(&level->base.bo_list);
+               list_del(&level->base.vm_status);
+               amdgpu_bo_unref(&level->base.bo->shadow);
+               amdgpu_bo_unref(&level->base.bo);
        }
 
        if (level->entries)
@@ -2605,8 +2734,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        struct amdgpu_bo_va_mapping *mapping, *tmp;
        bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+       u64 fault;
        int i;
 
+       /* Clear pending page faults from IH when the VM is destroyed */
+       while (kfifo_get(&vm->faults, &fault))
+               amdgpu_ih_clear_fault(adev, fault);
+
+       if (vm->pasid) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+               idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+               spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+       }
+
        amd_sched_entity_fini(vm->entity.sched, &vm->entity);
 
        if (!RB_EMPTY_ROOT(&vm->va)) {
@@ -2628,7 +2770,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        }
 
        amdgpu_vm_free_levels(&vm->root);
-       dma_fence_put(vm->last_dir_update);
+       dma_fence_put(vm->last_update);
        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
                amdgpu_vm_free_reserved_vmid(adev, vm, i);
 }
@@ -2686,6 +2828,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
        adev->vm_manager.vm_update_mode = 0;
 #endif
 
+       idr_init(&adev->vm_manager.pasid_idr);
+       spin_lock_init(&adev->vm_manager.pasid_lock);
 }
 
 /**
@@ -2699,6 +2843,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
 {
        unsigned i, j;
 
+       WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
+       idr_destroy(&adev->vm_manager.pasid_idr);
+
        for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
                struct amdgpu_vm_id_manager *id_mgr =
                        &adev->vm_manager.id_mgr[i];