]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
drm/nouveau/fb/ram: add interface to allocate vram as an nvkm_memory object
authorBen Skeggs <bskeggs@redhat.com>
Tue, 31 Oct 2017 17:56:19 +0000 (03:56 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Thu, 2 Nov 2017 03:32:23 +0000 (13:32 +1000)
Upcoming MMU changes use nvkm_memory as its basic representation of memory,
so we need to be able to allocate VRAM like this.

The code is basically identical to the current chipset-specific allocators,
minus support for compression tags (which will be handled elsewhere anyway).

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c

index b838d9385859798b749c8e508c238780dfbe7608..942d67174005033aaf48c65b0446a390a4ca0dcb 100644 (file)
@@ -145,6 +145,10 @@ struct nvkm_ram {
        struct nvkm_ram_data target;
 };
 
+int
+nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
+            bool contig, bool back, struct nvkm_memory **);
+
 struct nvkm_ram_func {
        u64 upper;
        u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
index 69f65daaf1eaaef933f2146f761477b628ec098d..8d17644c182346710fbdb72530be8178386a2e0a 100644 (file)
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
+#define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
 #include "ram.h"
 
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+struct nvkm_vram {
+       struct nvkm_memory memory;
+       struct nvkm_ram *ram;
+       u8 page;
+       struct nvkm_mm_node *mn;
+};
+
+static int
+nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+             struct nvkm_vma *vma, void *argv, u32 argc)
+{
+       struct nvkm_vram *vram = nvkm_vram(memory);
+       struct nvkm_mem mem = {
+               .mem = vram->mn,
+       };
+       nvkm_vm_map_at(vma, offset, &mem);
+       return 0;
+}
+
+static u64
+nvkm_vram_size(struct nvkm_memory *memory)
+{
+       return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT;
+}
+
+static u64
+nvkm_vram_addr(struct nvkm_memory *memory)
+{
+       struct nvkm_vram *vram = nvkm_vram(memory);
+       if (!nvkm_mm_contiguous(vram->mn))
+               return ~0ULL;
+       return (u64)nvkm_mm_addr(vram->mn) << NVKM_RAM_MM_SHIFT;
+}
+
+static u8
+nvkm_vram_page(struct nvkm_memory *memory)
+{
+       return nvkm_vram(memory)->page;
+}
+
+static enum nvkm_memory_target
+nvkm_vram_target(struct nvkm_memory *memory)
+{
+       return NVKM_MEM_TARGET_VRAM;
+}
+
+static void *
+nvkm_vram_dtor(struct nvkm_memory *memory)
+{
+       struct nvkm_vram *vram = nvkm_vram(memory);
+       struct nvkm_mm_node *next = vram->mn;
+       struct nvkm_mm_node *node;
+       mutex_lock(&vram->ram->fb->subdev.mutex);
+       while ((node = next)) {
+               next = node->next;
+               nvkm_mm_free(&vram->ram->vram, &node);
+       }
+       mutex_unlock(&vram->ram->fb->subdev.mutex);
+       return vram;
+}
+
+static const struct nvkm_memory_func
+nvkm_vram = {
+       .dtor = nvkm_vram_dtor,
+       .target = nvkm_vram_target,
+       .page = nvkm_vram_page,
+       .addr = nvkm_vram_addr,
+       .size = nvkm_vram_size,
+       .map = nvkm_vram_map,
+};
+
+int
+nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
+            bool contig, bool back, struct nvkm_memory **pmemory)
+{
+       struct nvkm_ram *ram;
+       struct nvkm_mm *mm;
+       struct nvkm_mm_node **node, *r;
+       struct nvkm_vram *vram;
+       u8   page = max(rpage, (u8)NVKM_RAM_MM_SHIFT);
+       u32 align = (1 << page) >> NVKM_RAM_MM_SHIFT;
+       u32   max = ALIGN(size, 1 << page) >> NVKM_RAM_MM_SHIFT;
+       u32   min = contig ? max : align;
+       int ret;
+
+       if (!device->fb || !(ram = device->fb->ram))
+               return -ENODEV;
+       ram = device->fb->ram;
+       mm = &ram->vram;
+
+       if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
+               return -ENOMEM;
+       nvkm_memory_ctor(&nvkm_vram, &vram->memory);
+       vram->ram = ram;
+       vram->page = page;
+       *pmemory = &vram->memory;
+
+       mutex_lock(&ram->fb->subdev.mutex);
+       node = &vram->mn;
+       do {
+               if (back)
+                       ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r);
+               else
+                       ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
+               if (ret) {
+                       mutex_unlock(&ram->fb->subdev.mutex);
+                       nvkm_memory_unref(pmemory);
+                       return ret;
+               }
+
+               *node = r;
+               node = &r->next;
+               max -= r->length;
+       } while (max);
+       mutex_unlock(&ram->fb->subdev.mutex);
+       return 0;
+}
+
 int
 nvkm_ram_init(struct nvkm_ram *ram)
 {