]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nouveau_mem.c
drm/nouveau: directly handle comptag allocation
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nouveau_mem.c
1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include "nouveau_mem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_bo.h"
25
26 #include <subdev/ltc.h>
27
28 #include <drm/ttm/ttm_bo_driver.h>
29
30 int
31 nouveau_mem_map(struct nouveau_mem *mem,
32 struct nvkm_vmm *vmm, struct nvkm_vma *vma)
33 {
34 nvkm_vm_map(vma, mem->_mem);
35 return 0;
36 }
37
38 void
39 nouveau_mem_fini(struct nouveau_mem *mem)
40 {
41 if (mem->vma[1].node) {
42 nvkm_vm_unmap(&mem->vma[1]);
43 nvkm_vm_put(&mem->vma[1]);
44 }
45 if (mem->vma[0].node) {
46 nvkm_vm_unmap(&mem->vma[0]);
47 nvkm_vm_put(&mem->vma[0]);
48 }
49 nvkm_memory_tags_put(&mem->memory, nvxx_device(&mem->cli->device),
50 &mem->tags);
51 }
52
53 int
54 nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
55 {
56 struct nouveau_mem *mem = nouveau_mem(reg);
57 struct nouveau_cli *cli = mem->cli;
58
59 if (mem->kind && cli->device.info.chipset == 0x50)
60 mem->comp = mem->kind = 0;
61 if (mem->comp) {
62 if (cli->device.info.chipset >= 0xc0)
63 mem->kind = gf100_pte_storage_type_map[mem->kind];
64 mem->comp = 0;
65 }
66
67 mem->__mem.size = (reg->num_pages << PAGE_SHIFT) >> 12;
68 mem->__mem.memtype = (mem->comp << 7) | mem->kind;
69 if (tt->ttm.sg) mem->__mem.sg = tt->ttm.sg;
70 else mem->__mem.pages = tt->dma_address;
71 mem->_mem = &mem->__mem;
72 mem->mem.page = 12;
73 mem->_mem->memory = &mem->memory;
74 return 0;
75 }
76
77 int
78 nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
79 {
80 struct nouveau_mem *mem = nouveau_mem(reg);
81 struct nouveau_cli *cli = mem->cli;
82 struct nvkm_device *device = nvxx_device(&cli->device);
83 struct nvkm_ram *ram = nvxx_fb(&cli->device)->ram;
84 u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
85 int ret;
86
87 mem->mem.page = page;
88 mem->_mem->memory = &mem->memory;
89
90 if (cli->device.info.chipset < 0xc0 && mem->comp) {
91 if (page == 16) {
92 ret = nvkm_memory_tags_get(mem->_mem->memory, device,
93 size >> page, NULL,
94 &mem->tags);
95 WARN_ON(ret);
96 }
97 if (!mem->tags || !mem->tags->mn)
98 mem->comp = 0;
99 } else
100 if (cli->device.info.chipset >= 0xc0 &&
101 gf100_pte_storage_type_map[mem->kind] != mem->kind) {
102 if (page == 17) {
103 ret = nvkm_memory_tags_get(mem->_mem->memory, device,
104 size >> page,
105 nvkm_ltc_tags_clear,
106 &mem->tags);
107 WARN_ON(ret);
108 }
109 if (!mem->tags || !mem->tags->mn)
110 mem->kind = gf100_pte_storage_type_map[mem->kind];
111 }
112
113 ret = ram->func->get(ram, size, 1 << page, contig ? 0 : 1 << page,
114 (mem->comp << 8) | mem->kind, &mem->_mem);
115 if (ret) {
116 nvkm_memory_tags_put(mem->_mem->memory, device, &mem->tags);
117 return ret;
118 }
119
120 if (mem->tags && mem->tags->mn)
121 mem->_mem->tag = mem->tags->mn;
122
123 reg->start = mem->_mem->offset >> PAGE_SHIFT;
124 return ret;
125 }
126
127 void
128 nouveau_mem_del(struct ttm_mem_reg *reg)
129 {
130 struct nouveau_mem *mem = nouveau_mem(reg);
131 nouveau_mem_fini(mem);
132 kfree(reg->mm_node);
133 reg->mm_node = NULL;
134 }
135
136 static enum nvkm_memory_target
137 nouveau_mem_memory_target(struct nvkm_memory *memory)
138 {
139 struct nouveau_mem *mem = container_of(memory, typeof(*mem), memory);
140 if (mem->_mem->mem)
141 return NVKM_MEM_TARGET_VRAM;
142 return NVKM_MEM_TARGET_HOST;
143 };
144
145 static u8
146 nouveau_mem_memory_page(struct nvkm_memory *memory)
147 {
148 struct nouveau_mem *mem = container_of(memory, typeof(*mem), memory);
149 return mem->mem.page;
150 };
151
152 static u64
153 nouveau_mem_memory_size(struct nvkm_memory *memory)
154 {
155 struct nouveau_mem *mem = container_of(memory, typeof(*mem), memory);
156 return mem->_mem->size << 12;
157 }
158
159 static const struct nvkm_memory_func
160 nouveau_mem_memory = {
161 .target = nouveau_mem_memory_target,
162 .page = nouveau_mem_memory_page,
163 .size = nouveau_mem_memory_size,
164 };
165
166 int
167 nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
168 struct ttm_mem_reg *reg)
169 {
170 struct nouveau_mem *mem;
171
172 if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
173 return -ENOMEM;
174 mem->cli = cli;
175 mem->kind = kind;
176 mem->comp = comp;
177 nvkm_memory_ctor(&nouveau_mem_memory, &mem->memory);
178
179 reg->mm_node = mem;
180 return 0;
181 }