]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
9bf688df24f0188c5c491f63ec94a241ecad80c2
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / base.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25 #include "vmm.h"
26
27 #include <subdev/fb.h>
28
29 #include <nvif/if500d.h>
30 #include <nvif/if900d.h>
31
32 struct nvkm_mmu_ptp {
33 struct nvkm_mmu_pt *pt;
34 struct list_head head;
35 u8 shift;
36 u16 mask;
37 u16 free;
38 };
39
40 static void
41 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
42 {
43 const int slot = pt->base >> pt->ptp->shift;
44 struct nvkm_mmu_ptp *ptp = pt->ptp;
45
46 /* If there were no free slots in the parent allocation before,
47 * there will be now, so return PTP to the cache.
48 */
49 if (!ptp->free)
50 list_add(&ptp->head, &mmu->ptp.list);
51 ptp->free |= BIT(slot);
52
53 /* If there's no more sub-allocations, destroy PTP. */
54 if (ptp->free == ptp->mask) {
55 nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
56 list_del(&ptp->head);
57 kfree(ptp);
58 }
59
60 kfree(pt);
61 }
62
63 struct nvkm_mmu_pt *
64 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
65 {
66 struct nvkm_mmu_pt *pt;
67 struct nvkm_mmu_ptp *ptp;
68 int slot;
69
70 if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
71 return NULL;
72
73 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
74 if (!ptp) {
75 /* Need to allocate a new parent to sub-allocate from. */
76 if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
77 kfree(pt);
78 return NULL;
79 }
80
81 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
82 if (!ptp->pt) {
83 kfree(ptp);
84 kfree(pt);
85 return NULL;
86 }
87
88 ptp->shift = order_base_2(size);
89 slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
90 ptp->mask = (1 << slot) - 1;
91 ptp->free = ptp->mask;
92 list_add(&ptp->head, &mmu->ptp.list);
93 }
94 pt->ptp = ptp;
95 pt->sub = true;
96
97 /* Sub-allocate from parent object, removing PTP from cache
98 * if there's no more free slots left.
99 */
100 slot = __ffs(ptp->free);
101 ptp->free &= ~BIT(slot);
102 if (!ptp->free)
103 list_del(&ptp->head);
104
105 pt->memory = pt->ptp->pt->memory;
106 pt->base = slot << ptp->shift;
107 pt->addr = pt->ptp->pt->addr + pt->base;
108 return pt;
109 }
110
111 struct nvkm_mmu_ptc {
112 struct list_head head;
113 struct list_head item;
114 u32 size;
115 u32 refs;
116 };
117
118 static inline struct nvkm_mmu_ptc *
119 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
120 {
121 struct nvkm_mmu_ptc *ptc;
122
123 list_for_each_entry(ptc, &mmu->ptc.list, head) {
124 if (ptc->size == size)
125 return ptc;
126 }
127
128 ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
129 if (ptc) {
130 INIT_LIST_HEAD(&ptc->item);
131 ptc->size = size;
132 ptc->refs = 0;
133 list_add(&ptc->head, &mmu->ptc.list);
134 }
135
136 return ptc;
137 }
138
139 void
140 nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
141 {
142 struct nvkm_mmu_pt *pt = *ppt;
143 if (pt) {
144 /* Handle sub-allocated page tables. */
145 if (pt->sub) {
146 mutex_lock(&mmu->ptp.mutex);
147 nvkm_mmu_ptp_put(mmu, force, pt);
148 mutex_unlock(&mmu->ptp.mutex);
149 return;
150 }
151
152 /* Either cache or free the object. */
153 mutex_lock(&mmu->ptc.mutex);
154 if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
155 list_add_tail(&pt->head, &pt->ptc->item);
156 pt->ptc->refs++;
157 } else {
158 nvkm_memory_unref(&pt->memory);
159 kfree(pt);
160 }
161 mutex_unlock(&mmu->ptc.mutex);
162 }
163 }
164
165 struct nvkm_mmu_pt *
166 nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
167 {
168 struct nvkm_mmu_ptc *ptc;
169 struct nvkm_mmu_pt *pt;
170 int ret;
171
172 /* Sub-allocated page table (ie. GP100 LPT). */
173 if (align < 0x1000) {
174 mutex_lock(&mmu->ptp.mutex);
175 pt = nvkm_mmu_ptp_get(mmu, align, zero);
176 mutex_unlock(&mmu->ptp.mutex);
177 return pt;
178 }
179
180 /* Lookup cache for this page table size. */
181 mutex_lock(&mmu->ptc.mutex);
182 ptc = nvkm_mmu_ptc_find(mmu, size);
183 if (!ptc) {
184 mutex_unlock(&mmu->ptc.mutex);
185 return NULL;
186 }
187
188 /* If there's a free PT in the cache, reuse it. */
189 pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
190 if (pt) {
191 if (zero)
192 nvkm_fo64(pt->memory, 0, 0, size >> 3);
193 list_del(&pt->head);
194 ptc->refs--;
195 mutex_unlock(&mmu->ptc.mutex);
196 return pt;
197 }
198 mutex_unlock(&mmu->ptc.mutex);
199
200 /* No such luck, we need to allocate. */
201 if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
202 return NULL;
203 pt->ptc = ptc;
204 pt->sub = false;
205
206 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
207 size, align, zero, &pt->memory);
208 if (ret) {
209 kfree(pt);
210 return NULL;
211 }
212
213 pt->base = 0;
214 pt->addr = nvkm_memory_addr(pt->memory);
215 return pt;
216 }
217
218 static void
219 nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta,
220 struct nvkm_mem *mem, nvkm_vmm_pte_func fn,
221 struct nvkm_vmm_map *map)
222 {
223 union {
224 struct nv50_vmm_map_v0 nv50;
225 struct gf100_vmm_map_v0 gf100;
226 } args;
227 struct nvkm_vmm *vmm = vma->vm;
228 void *argv = NULL;
229 u32 argc = 0;
230 int ret;
231
232 map->memory = mem->memory;
233 map->page = page;
234
235 if (vmm->func->valid) {
236 switch (vmm->mmu->subdev.device->card_type) {
237 case NV_50:
238 args.nv50.version = 0;
239 args.nv50.ro = !(vma->access & NV_MEM_ACCESS_WO);
240 args.nv50.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
241 args.nv50.kind = (mem->memtype & 0x07f);
242 args.nv50.comp = (mem->memtype & 0x180) >> 7;
243 argv = &args.nv50;
244 argc = sizeof(args.nv50);
245 break;
246 case NV_C0:
247 case NV_E0:
248 case GM100:
249 case GP100: {
250 args.gf100.version = 0;
251 args.gf100.vol = (nvkm_memory_target(map->memory) != NVKM_MEM_TARGET_VRAM);
252 args.gf100.ro = !(vma->access & NV_MEM_ACCESS_WO);
253 args.gf100.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
254 args.gf100.kind = (mem->memtype & 0x0ff);
255 argv = &args.gf100;
256 argc = sizeof(args.gf100);
257 }
258 break;
259 default:
260 break;
261 }
262
263 ret = vmm->func->valid(vmm, argv, argc, map);
264 if (WARN_ON(ret))
265 return;
266 }
267
268 mutex_lock(&vmm->mutex);
269 nvkm_vmm_ptes_map(vmm, page, vma->node->addr + delta,
270 vma->node->size, map, fn);
271 mutex_unlock(&vmm->mutex);
272
273 nvkm_memory_tags_put(vma->node->memory, vmm->mmu->subdev.device, &vma->node->tags);
274 nvkm_memory_unref(&vma->node->memory);
275 vma->node->memory = nvkm_memory_ref(map->memory);
276 vma->node->tags = map->tags;
277 }
278
279 void
280 nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
281 {
282 struct nvkm_mmu_ptc *ptc;
283 list_for_each_entry(ptc, &mmu->ptc.list, head) {
284 struct nvkm_mmu_pt *pt, *tt;
285 list_for_each_entry_safe(pt, tt, &ptc->item, head) {
286 nvkm_memory_unref(&pt->memory);
287 list_del(&pt->head);
288 kfree(pt);
289 }
290 }
291 }
292
293 static void
294 nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
295 {
296 struct nvkm_mmu_ptc *ptc, *ptct;
297
298 list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
299 WARN_ON(!list_empty(&ptc->item));
300 list_del(&ptc->head);
301 kfree(ptc);
302 }
303 }
304
305 static void
306 nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
307 {
308 mutex_init(&mmu->ptc.mutex);
309 INIT_LIST_HEAD(&mmu->ptc.list);
310 mutex_init(&mmu->ptp.mutex);
311 INIT_LIST_HEAD(&mmu->ptp.list);
312 }
313
314 void
315 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
316 {
317 const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
318 if (page->desc->func->unmap) {
319 struct nvkm_vmm_map map = { .mem = node->mem };
320 nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
321 return;
322 }
323 }
324
325 static void
326 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
327 struct nvkm_mem *mem)
328 {
329 const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
330 if (page->desc->func->unmap) {
331 struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
332 nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
333 return;
334 }
335 }
336
337 static void
338 nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
339 struct nvkm_mem *mem)
340 {
341 const struct nvkm_vmm_page *page = &vma->vm->func->page[vma->node->page];
342 if (page->desc->func->unmap) {
343 struct nvkm_vmm_map map = { .dma = mem->pages };
344 nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
345 return;
346 }
347 }
348
349 void
350 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
351 {
352 if (node->sg)
353 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
354 else
355 if (node->pages)
356 nvkm_vm_map_sg(vma, 0, node->size << 12, node);
357 else
358 nvkm_vm_map_at(vma, 0, node);
359 }
360
361 void
362 nvkm_vm_unmap(struct nvkm_vma *vma)
363 {
364 nvkm_vmm_unmap(vma->vm, vma->node);
365 }
366
367 int
368 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
369 struct nvkm_vma *vma)
370 {
371 int ret;
372
373 mutex_lock(&vm->mutex);
374 ret = nvkm_vmm_get_locked(vm, true, false, false, page_shift, 0,
375 size, &vma->node);
376 mutex_unlock(&vm->mutex);
377 if (ret)
378 return ret;
379
380 vma->memory = NULL;
381 vma->tags = NULL;
382 vma->vm = NULL;
383 nvkm_vm_ref(vm, &vma->vm, NULL);
384 vma->offset = vma->addr = vma->node->addr;
385 vma->access = access;
386 return 0;
387 }
388
389 void
390 nvkm_vm_put(struct nvkm_vma *vma)
391 {
392 nvkm_vmm_put(vma->vm, &vma->node);
393 nvkm_vm_ref(NULL, &vma->vm, NULL);
394 }
395
396 int
397 nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
398 {
399 return nvkm_vmm_boot(vm);
400 }
401
402 int
403 nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
404 struct lock_class_key *key, struct nvkm_vm **pvm)
405 {
406 struct nvkm_mmu *mmu = device->mmu;
407
408 *pvm = NULL;
409 if (mmu->func->vmm.ctor) {
410 int ret = mmu->func->vmm.ctor(mmu, mm_offset,
411 offset + length - mm_offset,
412 NULL, 0, key, "legacy", pvm);
413 if (ret) {
414 nvkm_vm_ref(NULL, pvm, NULL);
415 return ret;
416 }
417
418 return ret;
419 }
420
421 return -EINVAL;
422 }
423
424 int
425 nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
426 {
427 if (ref) {
428 if (inst) {
429 int ret = nvkm_vmm_join(ref, inst);
430 if (ret)
431 return ret;
432 }
433
434 nvkm_vmm_ref(ref);
435 }
436
437 if (*ptr) {
438 nvkm_vmm_part(*ptr, inst);
439 nvkm_vmm_unref(ptr);
440 }
441
442 *ptr = ref;
443 return 0;
444 }
445
446 static int
447 nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
448 {
449 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
450
451 if (mmu->func->vmm.global) {
452 int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
453 "gart", &mmu->vmm);
454 if (ret)
455 return ret;
456 }
457
458 return 0;
459 }
460
461 static int
462 nvkm_mmu_init(struct nvkm_subdev *subdev)
463 {
464 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
465 if (mmu->func->init)
466 mmu->func->init(mmu);
467 return 0;
468 }
469
470 static void *
471 nvkm_mmu_dtor(struct nvkm_subdev *subdev)
472 {
473 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
474
475 nvkm_vmm_unref(&mmu->vmm);
476
477 nvkm_mmu_ptc_fini(mmu);
478 return mmu;
479 }
480
481 static const struct nvkm_subdev_func
482 nvkm_mmu = {
483 .dtor = nvkm_mmu_dtor,
484 .oneinit = nvkm_mmu_oneinit,
485 .init = nvkm_mmu_init,
486 };
487
488 void
489 nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
490 int index, struct nvkm_mmu *mmu)
491 {
492 nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
493 mmu->func = func;
494 mmu->limit = func->limit;
495 mmu->dma_bits = func->dma_bits;
496 mmu->lpg_shift = func->lpg_shift;
497 nvkm_mmu_ptc_init(mmu);
498 }
499
500 int
501 nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
502 int index, struct nvkm_mmu **pmmu)
503 {
504 if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
505 return -ENOMEM;
506 nvkm_mmu_ctor(func, device, index, *pmmu);
507 return 0;
508 }