]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drm/nouveau/mmu: implement page table sub-allocation
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / base.c
index 455da298227f65c2b4c2cfc6a2cedebe12661877..1bdae020057e2c47cd9007e14a6636526a859239 100644 (file)
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
 
+struct nvkm_mmu_ptp {
+       struct nvkm_mmu_pt *pt;
+       struct list_head head;
+       u8  shift;
+       u16 mask;
+       u16 free;
+};
+
+static void
+nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
+{
+       const int slot = pt->base >> pt->ptp->shift;
+       struct nvkm_mmu_ptp *ptp = pt->ptp;
+
+       /* If there were no free slots in the parent allocation before,
+        * there will be now, so return PTP to the cache.
+        */
+       if (!ptp->free)
+               list_add(&ptp->head, &mmu->ptp.list);
+       ptp->free |= BIT(slot);
+
+       /* If there's no more sub-allocations, destroy PTP. */
+       if (ptp->free == ptp->mask) {
+               nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
+               list_del(&ptp->head);
+               kfree(ptp);
+       }
+
+       kfree(pt);
+}
+
+struct nvkm_mmu_pt *
+nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
+{
+       struct nvkm_mmu_pt *pt;
+       struct nvkm_mmu_ptp *ptp;
+       int slot;
+
+       if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
+               return NULL;
+
+       ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
+       if (!ptp) {
+               /* Need to allocate a new parent to sub-allocate from. */
+               if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
+                       kfree(pt);
+                       return NULL;
+               }
+
+               ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
+               if (!ptp->pt) {
+                       kfree(ptp);
+                       kfree(pt);
+                       return NULL;
+               }
+
+               ptp->shift = order_base_2(size);
+               slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
+               ptp->mask = (1 << slot) - 1;
+               ptp->free = ptp->mask;
+               list_add(&ptp->head, &mmu->ptp.list);
+       }
+       pt->ptp = ptp;
+       pt->sub = true;
+
+       /* Sub-allocate from parent object, removing PTP from cache
+        * if there's no more free slots left.
+        */
+       slot = __ffs(ptp->free);
+       ptp->free &= ~BIT(slot);
+       if (!ptp->free)
+               list_del(&ptp->head);
+
+       pt->memory = pt->ptp->pt->memory;
+       pt->base = slot << ptp->shift;
+       pt->addr = pt->ptp->pt->addr + pt->base;
+       return pt;
+}
+
+struct nvkm_mmu_ptc {
+       struct list_head head;
+       struct list_head item;
+       u32 size;
+       u32 refs;
+};
+
+static inline struct nvkm_mmu_ptc *
+nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
+{
+       struct nvkm_mmu_ptc *ptc;
+
+       list_for_each_entry(ptc, &mmu->ptc.list, head) {
+               if (ptc->size == size)
+                       return ptc;
+       }
+
+       ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
+       if (ptc) {
+               INIT_LIST_HEAD(&ptc->item);
+               ptc->size = size;
+               ptc->refs = 0;
+               list_add(&ptc->head, &mmu->ptc.list);
+       }
+
+       return ptc;
+}
+
+void
+nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
+{
+       struct nvkm_mmu_pt *pt = *ppt;
+       if (pt) {
+               /* Handle sub-allocated page tables. */
+               if (pt->sub) {
+                       mutex_lock(&mmu->ptp.mutex);
+                       nvkm_mmu_ptp_put(mmu, force, pt);
+                       mutex_unlock(&mmu->ptp.mutex);
+                       return;
+               }
+
+               /* Either cache or free the object. */
+               mutex_lock(&mmu->ptc.mutex);
+               if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
+                       list_add_tail(&pt->head, &pt->ptc->item);
+                       pt->ptc->refs++;
+               } else {
+                       nvkm_memory_unref(&pt->memory);
+                       kfree(pt);
+               }
+               mutex_unlock(&mmu->ptc.mutex);
+       }
+}
+
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
+{
+       struct nvkm_mmu_ptc *ptc;
+       struct nvkm_mmu_pt *pt;
+       int ret;
+
+       /* Sub-allocated page table (ie. GP100 LPT). */
+       if (align < 0x1000) {
+               mutex_lock(&mmu->ptp.mutex);
+               pt = nvkm_mmu_ptp_get(mmu, align, zero);
+               mutex_unlock(&mmu->ptp.mutex);
+               return pt;
+       }
+
+       /* Lookup cache for this page table size. */
+       mutex_lock(&mmu->ptc.mutex);
+       ptc = nvkm_mmu_ptc_find(mmu, size);
+       if (!ptc) {
+               mutex_unlock(&mmu->ptc.mutex);
+               return NULL;
+       }
+
+       /* If there's a free PT in the cache, reuse it. */
+       pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
+       if (pt) {
+               if (zero)
+                       nvkm_fo64(pt->memory, 0, 0, size >> 3);
+               list_del(&pt->head);
+               ptc->refs--;
+               mutex_unlock(&mmu->ptc.mutex);
+               return pt;
+       }
+       mutex_unlock(&mmu->ptc.mutex);
+
+       /* No such luck, we need to allocate. */
+       if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
+               return NULL;
+       pt->ptc = ptc;
+       pt->sub = false;
+
+       ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+                             size, align, zero, &pt->memory);
+       if (ret) {
+               kfree(pt);
+               return NULL;
+       }
+
+       pt->base = 0;
+       pt->addr = nvkm_memory_addr(pt->memory);
+       return pt;
+}
+
+void
+nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
+{
+       struct nvkm_mmu_ptc *ptc;
+       list_for_each_entry(ptc, &mmu->ptc.list, head) {
+               struct nvkm_mmu_pt *pt, *tt;
+               list_for_each_entry_safe(pt, tt, &ptc->item, head) {
+                       nvkm_memory_unref(&pt->memory);
+                       list_del(&pt->head);
+                       kfree(pt);
+               }
+       }
+}
+
+static void
+nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
+{
+       struct nvkm_mmu_ptc *ptc, *ptct;
+
+       list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
+               WARN_ON(!list_empty(&ptc->item));
+               list_del(&ptc->head);
+               kfree(ptc);
+       }
+}
+
+static void
+nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
+{
+       mutex_init(&mmu->ptc.mutex);
+       INIT_LIST_HEAD(&mmu->ptc.list);
+       mutex_init(&mmu->ptp.mutex);
+       INIT_LIST_HEAD(&mmu->ptp.list);
+}
+
 void
 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
 {
@@ -66,7 +287,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
                        delta += (u64)len << vma->node->type;
                }
                r = r->next;
-       };
+       }
 
        mmu->func->flush(vm);
 }
@@ -243,7 +464,7 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
 
                mmu->func->flush(vm);
 
-               nvkm_memory_del(&pgt);
+               nvkm_memory_unref(&pgt);
        }
 }
 
@@ -357,6 +578,7 @@ nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
                vm->pgt[0].refcount[0] = 1;
                vm->pgt[0].mem[0] = pgt;
                nvkm_memory_boot(pgt, vm);
+               vm->bootstrapped = true;
        }
 
        return ret;
@@ -388,7 +610,7 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
                return -ENOMEM;
        }
 
-       ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+       ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
                           block >> 12);
        if (ret) {
                vfree(vm->pgt);
@@ -481,6 +703,8 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
        }
 
        if (*ptr) {
+               if ((*ptr)->bootstrapped && pgd)
+                       nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
                nvkm_vm_unlink(*ptr, pgd);
                kref_put(&(*ptr)->refcount, nvkm_vm_del);
        }
@@ -511,9 +735,13 @@ static void *
 nvkm_mmu_dtor(struct nvkm_subdev *subdev)
 {
        struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+       void *data = mmu;
+
        if (mmu->func->dtor)
-               return mmu->func->dtor(mmu);
-       return mmu;
+               data = mmu->func->dtor(mmu);
+
+       nvkm_mmu_ptc_fini(mmu);
+       return data;
 }
 
 static const struct nvkm_subdev_func
@@ -532,6 +760,7 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
        mmu->limit = func->limit;
        mmu->dma_bits = func->dma_bits;
        mmu->lpg_shift = func->lpg_shift;
+       nvkm_mmu_ptc_init(mmu);
 }
 
 int