]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nouveau_ttm.c
drm/nouveau: allocate vram with nvkm_ram_get()
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nouveau_ttm.c
1 /*
2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3 * All Rights Reserved.
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26 #include "nouveau_drv.h"
27 #include "nouveau_gem.h"
28 #include "nouveau_mem.h"
29 #include "nouveau_ttm.h"
30
31 #include <drm/drm_legacy.h>
32
33 #include <core/tegra.h>
34
35 static int
36 nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37 {
38 return 0;
39 }
40
41 static int
42 nouveau_manager_fini(struct ttm_mem_type_manager *man)
43 {
44 return 0;
45 }
46
47 static void
48 nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
49 {
50 nouveau_mem_del(reg);
51 }
52
53 static void
54 nouveau_manager_debug(struct ttm_mem_type_manager *man,
55 struct drm_printer *printer)
56 {
57 }
58
59 static void
60 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
61 struct ttm_mem_reg *reg)
62 {
63 struct nvkm_memory *memory = nouveau_mem(reg)->_mem->memory;
64 nouveau_mem_del(reg);
65 nvkm_memory_unref(&memory);
66 }
67
68 static int
69 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
70 struct ttm_buffer_object *bo,
71 const struct ttm_place *place,
72 struct ttm_mem_reg *reg)
73 {
74 struct nouveau_bo *nvbo = nouveau_bo(bo);
75 struct nouveau_drm *drm = nvbo->cli->drm;
76 struct nouveau_mem *mem;
77 int ret;
78
79 if (drm->client.device.info.ram_size == 0)
80 return -ENOMEM;
81
82 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
83 mem = nouveau_mem(reg);
84 if (ret)
85 return ret;
86
87 ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
88 if (ret) {
89 nouveau_mem_del(reg);
90 if (ret == -ENOSPC) {
91 reg->mm_node = NULL;
92 return 0;
93 }
94 return ret;
95 }
96
97 return 0;
98 }
99
100 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
101 .init = nouveau_manager_init,
102 .takedown = nouveau_manager_fini,
103 .get_node = nouveau_vram_manager_new,
104 .put_node = nouveau_vram_manager_del,
105 .debug = nouveau_manager_debug,
106 };
107
108 static int
109 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
110 struct ttm_buffer_object *bo,
111 const struct ttm_place *place,
112 struct ttm_mem_reg *reg)
113 {
114 struct nouveau_bo *nvbo = nouveau_bo(bo);
115 struct nouveau_drm *drm = nvbo->cli->drm;
116 struct nouveau_mem *mem;
117 int ret;
118
119 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
120 mem = nouveau_mem(reg);
121 if (ret)
122 return ret;
123
124 mem->_mem = &mem->__mem;
125 reg->start = 0;
126 return 0;
127 }
128
129 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
130 .init = nouveau_manager_init,
131 .takedown = nouveau_manager_fini,
132 .get_node = nouveau_gart_manager_new,
133 .put_node = nouveau_manager_del,
134 .debug = nouveau_manager_debug
135 };
136
137 static int
138 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
139 struct ttm_buffer_object *bo,
140 const struct ttm_place *place,
141 struct ttm_mem_reg *reg)
142 {
143 struct nouveau_bo *nvbo = nouveau_bo(bo);
144 struct nouveau_drm *drm = nvbo->cli->drm;
145 struct nouveau_mem *mem;
146 struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
147 int ret;
148
149 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
150 mem = nouveau_mem(reg);
151 if (ret)
152 return ret;
153
154 ret = nvkm_vm_get(mmu->vmm, reg->num_pages << 12, 12,
155 NV_MEM_ACCESS_RW, &mem->vma[0]);
156 if (ret) {
157 nouveau_mem_del(reg);
158 if (ret == -ENOSPC) {
159 reg->mm_node = NULL;
160 return 0;
161 }
162 return ret;
163 }
164
165 mem->_mem = &mem->__mem;
166 reg->start = mem->vma[0].addr >> PAGE_SHIFT;
167 return 0;
168 }
169
170 const struct ttm_mem_type_manager_func nv04_gart_manager = {
171 .init = nouveau_manager_init,
172 .takedown = nouveau_manager_fini,
173 .get_node = nv04_gart_manager_new,
174 .put_node = nouveau_manager_del,
175 .debug = nouveau_manager_debug
176 };
177
178 int
179 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
180 {
181 struct drm_file *file_priv = filp->private_data;
182 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
183
184 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
185 return drm_legacy_mmap(filp, vma);
186
187 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
188 }
189
190 static int
191 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
192 {
193 return ttm_mem_global_init(ref->object);
194 }
195
196 static void
197 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
198 {
199 ttm_mem_global_release(ref->object);
200 }
201
202 int
203 nouveau_ttm_global_init(struct nouveau_drm *drm)
204 {
205 struct drm_global_reference *global_ref;
206 int ret;
207
208 global_ref = &drm->ttm.mem_global_ref;
209 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
210 global_ref->size = sizeof(struct ttm_mem_global);
211 global_ref->init = &nouveau_ttm_mem_global_init;
212 global_ref->release = &nouveau_ttm_mem_global_release;
213
214 ret = drm_global_item_ref(global_ref);
215 if (unlikely(ret != 0)) {
216 DRM_ERROR("Failed setting up TTM memory accounting\n");
217 drm->ttm.mem_global_ref.release = NULL;
218 return ret;
219 }
220
221 drm->ttm.bo_global_ref.mem_glob = global_ref->object;
222 global_ref = &drm->ttm.bo_global_ref.ref;
223 global_ref->global_type = DRM_GLOBAL_TTM_BO;
224 global_ref->size = sizeof(struct ttm_bo_global);
225 global_ref->init = &ttm_bo_global_init;
226 global_ref->release = &ttm_bo_global_release;
227
228 ret = drm_global_item_ref(global_ref);
229 if (unlikely(ret != 0)) {
230 DRM_ERROR("Failed setting up TTM BO subsystem\n");
231 drm_global_item_unref(&drm->ttm.mem_global_ref);
232 drm->ttm.mem_global_ref.release = NULL;
233 return ret;
234 }
235
236 return 0;
237 }
238
239 void
240 nouveau_ttm_global_release(struct nouveau_drm *drm)
241 {
242 if (drm->ttm.mem_global_ref.release == NULL)
243 return;
244
245 drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
246 drm_global_item_unref(&drm->ttm.mem_global_ref);
247 drm->ttm.mem_global_ref.release = NULL;
248 }
249
250 int
251 nouveau_ttm_init(struct nouveau_drm *drm)
252 {
253 struct nvkm_device *device = nvxx_device(&drm->client.device);
254 struct nvkm_pci *pci = device->pci;
255 struct drm_device *dev = drm->dev;
256 u8 bits;
257 int ret;
258
259 if (pci && pci->agp.bridge) {
260 drm->agp.bridge = pci->agp.bridge;
261 drm->agp.base = pci->agp.base;
262 drm->agp.size = pci->agp.size;
263 drm->agp.cma = pci->agp.cma;
264 }
265
266 bits = nvxx_mmu(&drm->client.device)->dma_bits;
267 if (nvxx_device(&drm->client.device)->func->pci) {
268 if (drm->agp.bridge)
269 bits = 32;
270 } else if (device->func->tegra) {
271 struct nvkm_device_tegra *tegra = device->func->tegra(device);
272
273 /*
274 * If the platform can use a IOMMU, then the addressable DMA
275 * space is constrained by the IOMMU bit
276 */
277 if (tegra->func->iommu_bit)
278 bits = min(bits, tegra->func->iommu_bit);
279
280 }
281
282 ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
283 if (ret && bits != 32) {
284 bits = 32;
285 ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
286 }
287 if (ret)
288 return ret;
289
290 ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
291 if (ret)
292 dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
293
294 ret = nouveau_ttm_global_init(drm);
295 if (ret)
296 return ret;
297
298 ret = ttm_bo_device_init(&drm->ttm.bdev,
299 drm->ttm.bo_global_ref.ref.object,
300 &nouveau_bo_driver,
301 dev->anon_inode->i_mapping,
302 DRM_FILE_PAGE_OFFSET,
303 bits <= 32 ? true : false);
304 if (ret) {
305 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
306 return ret;
307 }
308
309 /* VRAM init */
310 drm->gem.vram_available = drm->client.device.info.ram_user;
311
312 arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
313 device->func->resource_size(device, 1));
314
315 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
316 drm->gem.vram_available >> PAGE_SHIFT);
317 if (ret) {
318 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
319 return ret;
320 }
321
322 drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
323 device->func->resource_size(device, 1));
324
325 /* GART init */
326 if (!drm->agp.bridge) {
327 drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
328 } else {
329 drm->gem.gart_available = drm->agp.size;
330 }
331
332 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
333 drm->gem.gart_available >> PAGE_SHIFT);
334 if (ret) {
335 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
336 return ret;
337 }
338
339 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
340 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
341 return 0;
342 }
343
344 void
345 nouveau_ttm_fini(struct nouveau_drm *drm)
346 {
347 struct nvkm_device *device = nvxx_device(&drm->client.device);
348
349 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
350 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
351
352 ttm_bo_device_release(&drm->ttm.bdev);
353
354 nouveau_ttm_global_release(drm);
355
356 arch_phys_wc_del(drm->ttm.mtrr);
357 drm->ttm.mtrr = 0;
358 arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
359 device->func->resource_size(device, 1));
360
361 }