*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "ummu.h"
#include "vmm.h"
-#include <core/gpuobj.h>
+#include <subdev/bar.h>
#include <subdev/fb.h>
+#include <nvif/if500d.h>
+#include <nvif/if900d.h>
+
struct nvkm_mmu_ptp {
struct nvkm_mmu_pt *pt;
struct list_head head;
INIT_LIST_HEAD(&mmu->ptp.list);
}
-void
-nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_mm_node *r = node->mem;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- delta = 0;
- while (r) {
- u64 phys = (u64)r->offset << 12;
- u32 num = r->length >> bits;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->map(vma, pgt, node, pte, len, phys, delta);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- phys += len << (bits + 12);
- pde++;
- pte = 0;
- }
-
- delta += (u64)len << vma->node->type;
- }
- r = r->next;
- }
-
- mmu->func->flush(vm);
-}
-
-static void
-nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
- struct nvkm_mem *mem)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- unsigned m, sglen;
- u32 end, len;
- int i;
- struct scatterlist *sg;
-
- for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
- sglen = sg_dma_len(sg) >> PAGE_SHIFT;
-
- end = pte + sglen;
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- for (m = 0; m < len; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
-
- if (num == 0)
- goto finish;
- }
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- if (m < sglen) {
- for (; m < sglen; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
- if (num == 0)
- goto finish;
- }
- }
-
- }
-finish:
- mmu->func->flush(vm);
-}
-
static void
-nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
- struct nvkm_mem *mem)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- dma_addr_t *list = mem->pages;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->map_sg(vma, pgt, mem, pte, len, list);
-
- num -= len;
- pte += len;
- list += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- }
-
- mmu->func->flush(vm);
-}
-
-void
-nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
+nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
{
- if (node->sg)
- nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
- else
- if (node->pages)
- nvkm_vm_map_sg(vma, 0, node->size << 12, node);
- else
- nvkm_vm_map_at(vma, 0, node);
-}
-
-void
-nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->unmap(vma, pgt, pte, len);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- }
-
- mmu->func->flush(vm);
-}
-
-void
-nvkm_vm_unmap(struct nvkm_vma *vma)
-{
- nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
-}
-
-static void
-nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
-{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_vm_pgt *vpgt;
- struct nvkm_memory *pgt;
- u32 pde;
-
- for (pde = fpde; pde <= lpde; pde++) {
- vpgt = &vm->pgt[pde - vm->fpde];
- if (--vpgt->refcount[big])
- continue;
-
- pgt = vpgt->mem[big];
- vpgt->mem[big] = NULL;
-
- if (mmu->func->map_pgt)
- mmu->func->map_pgt(vm, pde, vpgt->mem);
-
- mmu->func->flush(vm);
-
- nvkm_memory_unref(&pgt);
+ if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
+ mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
+ mmu->type[mmu->type_nr].heap = heap;
+ mmu->type_nr++;
}
}
static int
-nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
+nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- int big = (type != mmu->func->spg_shift);
- u32 pgt_size;
- int ret;
-
- pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
- pgt_size *= 8;
-
- ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
- pgt_size, 0x1000, true, &vpgt->mem[big]);
- if (unlikely(ret))
- return ret;
-
- if (mmu->func->map_pgt)
- mmu->func->map_pgt(vm, pde, vpgt->mem);
-
- vpgt->refcount[big]++;
- return 0;
-}
-
-int
-nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
- struct nvkm_vma *vma)
-{
- struct nvkm_mmu *mmu = vm->mmu;
- u32 align = (1 << page_shift) >> 12;
- u32 msize = size >> 12;
- u32 fpde, lpde, pde;
- int ret;
-
- mutex_lock(&vm->mutex);
- ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
- &vma->node);
- if (unlikely(ret != 0)) {
- mutex_unlock(&vm->mutex);
- return ret;
- }
-
- fpde = (vma->node->offset >> mmu->func->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
-
- for (pde = fpde; pde <= lpde; pde++) {
- struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- int big = (vma->node->type != mmu->func->spg_shift);
-
- if (likely(vpgt->refcount[big])) {
- vpgt->refcount[big]++;
- continue;
+ if (size) {
+ if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
+ mmu->heap[mmu->heap_nr].type = type;
+ mmu->heap[mmu->heap_nr].size = size;
+ return mmu->heap_nr++;
}
-
- ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
- if (ret) {
- if (pde != fpde)
- nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
- nvkm_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&vm->mutex);
- return ret;
- }
- }
- mutex_unlock(&vm->mutex);
-
- vma->vm = NULL;
- nvkm_vm_ref(vm, &vma->vm, NULL);
- vma->offset = (u64)vma->node->offset << 12;
- vma->access = access;
- return 0;
-}
-
-void
-nvkm_vm_put(struct nvkm_vma *vma)
-{
- struct nvkm_mmu *mmu;
- struct nvkm_vm *vm;
- u32 fpde, lpde;
-
- if (unlikely(vma->node == NULL))
- return;
- vm = vma->vm;
- mmu = vm->mmu;
-
- fpde = (vma->node->offset >> mmu->func->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
-
- mutex_lock(&vm->mutex);
- nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
- nvkm_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&vm->mutex);
-
- nvkm_vm_ref(NULL, &vma->vm, NULL);
-}
-
-int
-nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
-{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_memory *pgt;
- int ret;
-
- ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
- (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
- if (ret == 0) {
- vm->pgt[0].refcount[0] = 1;
- vm->pgt[0].mem[0] = pgt;
- nvkm_memory_boot(pgt, vm);
- vm->bootstrapped = true;
- }
-
- return ret;
-}
-
-static int
-nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- u32 block, struct nvkm_vm *vm)
-{
- u64 mm_length = (offset + length) - mm_offset;
- int ret;
-
- kref_init(&vm->refcount);
- vm->fpde = offset >> (mmu->func->pgt_bits + 12);
- vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
-
- vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
- if (!vm->pgt) {
- kfree(vm);
- return -ENOMEM;
}
-
- if (block > length)
- block = length;
-
- ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
- block >> 12);
- if (ret) {
- vfree(vm->pgt);
- return ret;
- }
-
- return 0;
+ return -EINVAL;
}
-int
-nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *key, struct nvkm_vm **pvm)
+static void
+nvkm_mmu_host(struct nvkm_mmu *mmu)
{
- struct nvkm_mmu *mmu = device->mmu;
-
- *pvm = NULL;
- if (mmu->func->vmm.ctor) {
- int ret = mmu->func->vmm.ctor(mmu, mm_offset,
- offset + length - mm_offset,
- NULL, 0, key, "legacy", pvm);
- if (ret) {
- nvkm_vm_ref(NULL, pvm, NULL);
- return ret;
- }
-
- ret = nvkm_vm_legacy(mmu, offset, length, mm_offset,
- (*pvm)->func->page_block ?
- (*pvm)->func->page_block : 4096, *pvm);
- if (ret)
- nvkm_vm_ref(NULL, pvm, NULL);
+ struct nvkm_device *device = mmu->subdev.device;
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
+ int heap;
+
+ /* Non-mappable system memory. */
+ heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Non-coherent, cached, system memory.
+ *
+ * Block-linear mappings of system memory must be done through
+ * BAR1, and cannot be supported on systems where we're unable
+ * to map BAR1 with write-combining.
+ */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar || device->bar->iomap_uncached)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+ else
+ nvkm_mmu_type(mmu, heap, type);
- return ret;
- }
+ /* Coherent, cached, system memory.
+ *
+ * Unsupported on systems that aren't able to support snooped
+ * mappings, and also for block-linear mappings which must be
+ * done through BAR1.
+ */
+ type |= NVKM_MEM_COHERENT;
+ if (device->func->cpu_coherent)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
- return -EINVAL;
+ /* Uncached system memory. */
+ nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
}
static void
-nvkm_vm_del(struct kref *kref)
+nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
- struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_mm *mm = &device->fb->ram->vram;
+ const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
+ u8 heap = NVKM_MEM_VRAM;
+ int heapM, heapN, heapU;
+
+ /* Mixed-memory doesn't support compression or display. */
+ heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
+
+ heap |= NVKM_MEM_COMP;
+ heap |= NVKM_MEM_DISP;
+ heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
+ heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
+
+ /* Add non-mappable VRAM types first so that they're preferred
+ * over anything else. Mixed-memory will be slower than other
+ * heaps, it's prioritised last.
+ */
+ nvkm_mmu_type(mmu, heapU, type);
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
- nvkm_mm_fini(&vm->mm);
- vfree(vm->pgt);
- if (vm->func)
- nvkm_vmm_dtor(vm);
- kfree(vm);
-}
+ /* Add host memory types next, under the assumption that users
+ * wanting mappable memory want to use them as staging buffers
+ * or the like.
+ */
+ nvkm_mmu_host(mmu);
-int
-nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
-{
- if (ref) {
- if (ref->func->join && inst) {
- int ret = ref->func->join(ref, inst), i;
- if (ret)
- return ret;
-
- if (ref->mmu->func->map_pgt) {
- for (i = ref->fpde; i <= ref->lpde; i++)
- ref->mmu->func->map_pgt(ref, i, ref->pgt[i - ref->fpde].mem);
- }
+ /* Mappable VRAM types go last, as they're basically the worst
+ * possible type to ask for unless there's no other choice.
+ */
+ if (device->bar) {
+ /* Write-combined BAR1 access. */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar->iomap_uncached) {
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
}
- kref_get(&ref->refcount);
+ /* Uncached BAR1 access. */
+ type |= NVKM_MEM_COHERENT;
+ type |= NVKM_MEM_UNCACHED;
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
}
-
- if (*ptr) {
- if ((*ptr)->func->part && inst)
- (*ptr)->func->part(*ptr, inst);
- if ((*ptr)->bootstrapped && inst)
- nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
- kref_put(&(*ptr)->refcount, nvkm_vm_del);
- }
-
- *ptr = ref;
- return 0;
}
static int
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+ /* Determine available memory types. */
+ if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
+ nvkm_mmu_vram(mmu);
+ else
+ nvkm_mmu_host(mmu);
+
if (mmu->func->vmm.global) {
- int ret = nvkm_vm_new(subdev->device, 0, mmu->limit, 0,
- NULL, &mmu->vmm);
+ int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
+ "gart", &mmu->vmm);
if (ret)
return ret;
}
- if (mmu->func->oneinit)
- return mmu->func->oneinit(mmu);
-
return 0;
}
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
- nvkm_vm_ref(NULL, &mmu->vmm, NULL);
+ nvkm_vmm_unref(&mmu->vmm);
nvkm_mmu_ptc_fini(mmu);
return mmu;
{
nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
mmu->func = func;
- mmu->limit = func->limit;
mmu->dma_bits = func->dma_bits;
- mmu->lpg_shift = func->lpg_shift;
nvkm_mmu_ptc_init(mmu);
+ mmu->user.ctor = nvkm_ummu_new;
+ mmu->user.base = func->mmu.user;
}
int