]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drm/nouveau/mmu: remove support for old backends
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / base.c
CommitLineData
a11c3198
BS
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
c9582455 24#include "priv.h"
806a7335 25#include "vmm.h"
a11c3198 26
c9582455 27#include <subdev/fb.h>
a11c3198 28
fd542a3e 29#include <nvif/if500d.h>
b77791da 30#include <nvif/if900d.h>
fd542a3e 31
f1280394
BS
32struct nvkm_mmu_ptp {
33 struct nvkm_mmu_pt *pt;
34 struct list_head head;
35 u8 shift;
36 u16 mask;
37 u16 free;
38};
39
40static void
41nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
42{
43 const int slot = pt->base >> pt->ptp->shift;
44 struct nvkm_mmu_ptp *ptp = pt->ptp;
45
46 /* If there were no free slots in the parent allocation before,
47 * there will be now, so return PTP to the cache.
48 */
49 if (!ptp->free)
50 list_add(&ptp->head, &mmu->ptp.list);
51 ptp->free |= BIT(slot);
52
53 /* If there's no more sub-allocations, destroy PTP. */
54 if (ptp->free == ptp->mask) {
55 nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
56 list_del(&ptp->head);
57 kfree(ptp);
58 }
59
60 kfree(pt);
61}
62
63struct nvkm_mmu_pt *
64nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
65{
66 struct nvkm_mmu_pt *pt;
67 struct nvkm_mmu_ptp *ptp;
68 int slot;
69
70 if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
71 return NULL;
72
73 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
74 if (!ptp) {
75 /* Need to allocate a new parent to sub-allocate from. */
76 if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
77 kfree(pt);
78 return NULL;
79 }
80
81 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
82 if (!ptp->pt) {
83 kfree(ptp);
84 kfree(pt);
85 return NULL;
86 }
87
88 ptp->shift = order_base_2(size);
89 slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
90 ptp->mask = (1 << slot) - 1;
91 ptp->free = ptp->mask;
92 list_add(&ptp->head, &mmu->ptp.list);
93 }
94 pt->ptp = ptp;
95 pt->sub = true;
96
97 /* Sub-allocate from parent object, removing PTP from cache
98 * if there's no more free slots left.
99 */
100 slot = __ffs(ptp->free);
101 ptp->free &= ~BIT(slot);
102 if (!ptp->free)
103 list_del(&ptp->head);
104
105 pt->memory = pt->ptp->pt->memory;
106 pt->base = slot << ptp->shift;
107 pt->addr = pt->ptp->pt->addr + pt->base;
108 return pt;
109}
110
9a45ddaa
BS
111struct nvkm_mmu_ptc {
112 struct list_head head;
113 struct list_head item;
114 u32 size;
115 u32 refs;
116};
117
118static inline struct nvkm_mmu_ptc *
119nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
120{
121 struct nvkm_mmu_ptc *ptc;
122
123 list_for_each_entry(ptc, &mmu->ptc.list, head) {
124 if (ptc->size == size)
125 return ptc;
126 }
127
128 ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
129 if (ptc) {
130 INIT_LIST_HEAD(&ptc->item);
131 ptc->size = size;
132 ptc->refs = 0;
133 list_add(&ptc->head, &mmu->ptc.list);
134 }
135
136 return ptc;
137}
138
139void
140nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
141{
142 struct nvkm_mmu_pt *pt = *ppt;
143 if (pt) {
f1280394
BS
144 /* Handle sub-allocated page tables. */
145 if (pt->sub) {
146 mutex_lock(&mmu->ptp.mutex);
147 nvkm_mmu_ptp_put(mmu, force, pt);
148 mutex_unlock(&mmu->ptp.mutex);
149 return;
150 }
151
9a45ddaa
BS
152 /* Either cache or free the object. */
153 mutex_lock(&mmu->ptc.mutex);
154 if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
155 list_add_tail(&pt->head, &pt->ptc->item);
156 pt->ptc->refs++;
157 } else {
158 nvkm_memory_unref(&pt->memory);
159 kfree(pt);
160 }
161 mutex_unlock(&mmu->ptc.mutex);
162 }
163}
164
165struct nvkm_mmu_pt *
166nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
167{
168 struct nvkm_mmu_ptc *ptc;
169 struct nvkm_mmu_pt *pt;
170 int ret;
171
f1280394
BS
172 /* Sub-allocated page table (ie. GP100 LPT). */
173 if (align < 0x1000) {
174 mutex_lock(&mmu->ptp.mutex);
175 pt = nvkm_mmu_ptp_get(mmu, align, zero);
176 mutex_unlock(&mmu->ptp.mutex);
177 return pt;
178 }
179
9a45ddaa
BS
180 /* Lookup cache for this page table size. */
181 mutex_lock(&mmu->ptc.mutex);
182 ptc = nvkm_mmu_ptc_find(mmu, size);
183 if (!ptc) {
184 mutex_unlock(&mmu->ptc.mutex);
185 return NULL;
186 }
187
188 /* If there's a free PT in the cache, reuse it. */
189 pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
190 if (pt) {
191 if (zero)
192 nvkm_fo64(pt->memory, 0, 0, size >> 3);
193 list_del(&pt->head);
194 ptc->refs--;
195 mutex_unlock(&mmu->ptc.mutex);
196 return pt;
197 }
198 mutex_unlock(&mmu->ptc.mutex);
199
200 /* No such luck, we need to allocate. */
201 if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
202 return NULL;
203 pt->ptc = ptc;
f1280394 204 pt->sub = false;
9a45ddaa
BS
205
206 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
207 size, align, zero, &pt->memory);
208 if (ret) {
209 kfree(pt);
210 return NULL;
211 }
212
213 pt->base = 0;
214 pt->addr = nvkm_memory_addr(pt->memory);
215 return pt;
216}
217
eb813999
BS
218static void
219nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta,
220 struct nvkm_mem *mem, nvkm_vmm_pte_func fn,
221 struct nvkm_vmm_map *map)
222{
fd542a3e
BS
223 union {
224 struct nv50_vmm_map_v0 nv50;
b77791da 225 struct gf100_vmm_map_v0 gf100;
fd542a3e 226 } args;
eb813999
BS
227 struct nvkm_vmm *vmm = vma->vm;
228 void *argv = NULL;
229 u32 argc = 0;
230 int ret;
231
232 map->memory = mem->memory;
233 map->page = page;
234
235 if (vmm->func->valid) {
fd542a3e
BS
236 switch (vmm->mmu->subdev.device->card_type) {
237 case NV_50:
238 args.nv50.version = 0;
239 args.nv50.ro = !(vma->access & NV_MEM_ACCESS_WO);
240 args.nv50.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
241 args.nv50.kind = (mem->memtype & 0x07f);
242 args.nv50.comp = (mem->memtype & 0x180) >> 7;
243 argv = &args.nv50;
244 argc = sizeof(args.nv50);
245 break;
b77791da
BS
246 case NV_C0:
247 case NV_E0:
248 case GM100:
249 case GP100: {
250 args.gf100.version = 0;
251 args.gf100.vol = (nvkm_memory_target(map->memory) != NVKM_MEM_TARGET_VRAM);
252 args.gf100.ro = !(vma->access & NV_MEM_ACCESS_WO);
253 args.gf100.priv = !!(vma->access & NV_MEM_ACCESS_SYS);
254 args.gf100.kind = (mem->memtype & 0x0ff);
255 argv = &args.gf100;
256 argc = sizeof(args.gf100);
257 }
258 break;
fd542a3e
BS
259 default:
260 break;
261 }
262
eb813999
BS
263 ret = vmm->func->valid(vmm, argv, argc, map);
264 if (WARN_ON(ret))
265 return;
266 }
267
268 mutex_lock(&vmm->mutex);
269 nvkm_vmm_ptes_map(vmm, page, ((u64)vma->node->offset << 12) + delta,
270 (u64)vma->node->length << 12, map, fn);
271 mutex_unlock(&vmm->mutex);
272
273 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
274 nvkm_memory_unref(&vma->memory);
275 vma->memory = nvkm_memory_ref(map->memory);
276 vma->tags = map->tags;
277}
278
9a45ddaa
BS
279void
280nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
281{
282 struct nvkm_mmu_ptc *ptc;
283 list_for_each_entry(ptc, &mmu->ptc.list, head) {
284 struct nvkm_mmu_pt *pt, *tt;
285 list_for_each_entry_safe(pt, tt, &ptc->item, head) {
286 nvkm_memory_unref(&pt->memory);
287 list_del(&pt->head);
288 kfree(pt);
289 }
290 }
291}
292
293static void
294nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
295{
296 struct nvkm_mmu_ptc *ptc, *ptct;
297
298 list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
299 WARN_ON(!list_empty(&ptc->item));
300 list_del(&ptc->head);
301 kfree(ptc);
302 }
303}
304
305static void
306nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
307{
308 mutex_init(&mmu->ptc.mutex);
309 INIT_LIST_HEAD(&mmu->ptc.list);
f1280394
BS
310 mutex_init(&mmu->ptp.mutex);
311 INIT_LIST_HEAD(&mmu->ptp.list);
9a45ddaa
BS
312}
313
a11c3198 314void
42594600 315nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
a11c3198 316{
eb813999 317 const struct nvkm_vmm_page *page = vma->vm->func->page;
eb813999
BS
318 if (page->desc->func->unmap) {
319 struct nvkm_vmm_map map = { .mem = node->mem };
320 while (page->shift != vma->node->type)
321 page++;
322 nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
323 return;
324 }
a11c3198
BS
325}
326
2e2cfbe6 327static void
42594600
BS
328nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
329 struct nvkm_mem *mem)
22b33e8e 330{
eb813999 331 const struct nvkm_vmm_page *page = vma->vm->func->page;
eb813999
BS
332 if (page->desc->func->unmap) {
333 struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
334 while (page->shift != vma->node->type)
335 page++;
336 nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
337 return;
338 }
22b33e8e
DA
339}
340
2e2cfbe6 341static void
42594600
BS
342nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
343 struct nvkm_mem *mem)
a11c3198 344{
eb813999 345 const struct nvkm_vmm_page *page = vma->vm->func->page;
eb813999
BS
346 if (page->desc->func->unmap) {
347 struct nvkm_vmm_map map = { .dma = mem->pages };
348 while (page->shift != vma->node->type)
349 page++;
350 nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
351 return;
352 }
a11c3198
BS
353}
354
2e2cfbe6 355void
42594600 356nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
2e2cfbe6
BS
357{
358 if (node->sg)
42594600 359 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
2e2cfbe6
BS
360 else
361 if (node->pages)
42594600 362 nvkm_vm_map_sg(vma, 0, node->size << 12, node);
2e2cfbe6 363 else
42594600 364 nvkm_vm_map_at(vma, 0, node);
2e2cfbe6
BS
365}
366
a11c3198 367void
42594600 368nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
a11c3198 369{
42594600 370 struct nvkm_vm *vm = vma->vm;
eb813999
BS
371 if (vm->func->page->desc->func->unmap) {
372 const struct nvkm_vmm_page *page = vm->func->page;
373 while (page->shift != vma->node->type)
374 page++;
375 mutex_lock(&vm->mutex);
376 nvkm_vmm_ptes_unmap(vm, page, (vma->node->offset << 12) + delta,
377 vma->node->length << 12, false);
378 mutex_unlock(&vm->mutex);
379 return;
380 }
a11c3198
BS
381}
382
383void
42594600 384nvkm_vm_unmap(struct nvkm_vma *vma)
a11c3198 385{
42594600 386 nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
eb813999
BS
387
388 nvkm_memory_tags_put(vma->memory, vma->vm->mmu->subdev.device, &vma->tags);
389 nvkm_memory_unref(&vma->memory);
a11c3198
BS
390}
391
a11c3198 392int
42594600
BS
393nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
394 struct nvkm_vma *vma)
a11c3198
BS
395{
396 u32 align = (1 << page_shift) >> 12;
397 u32 msize = size >> 12;
a11c3198
BS
398 int ret;
399
1de68568 400 mutex_lock(&vm->mutex);
42594600
BS
401 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
402 &vma->node);
a11c3198 403 if (unlikely(ret != 0)) {
1de68568 404 mutex_unlock(&vm->mutex);
a11c3198
BS
405 return ret;
406 }
407
eb813999
BS
408 if (vm->func->page->desc->func->unmap) {
409 const struct nvkm_vmm_page *page = vm->func->page;
410 while (page->shift != page_shift)
411 page++;
412
413 ret = nvkm_vmm_ptes_get(vm, page, vma->node->offset << 12,
414 vma->node->length << 12);
415 if (ret) {
416 nvkm_mm_free(&vm->mm, &vma->node);
417 mutex_unlock(&vm->mutex);
418 return ret;
419 }
a11c3198 420 }
1de68568 421 mutex_unlock(&vm->mutex);
a11c3198 422
eb813999
BS
423 vma->memory = NULL;
424 vma->tags = NULL;
780194b1 425 vma->vm = NULL;
42594600 426 nvkm_vm_ref(vm, &vma->vm, NULL);
a11c3198
BS
427 vma->offset = (u64)vma->node->offset << 12;
428 vma->access = access;
429 return 0;
430}
431
432void
42594600 433nvkm_vm_put(struct nvkm_vma *vma)
a11c3198 434{
227c95d9
BS
435 struct nvkm_mmu *mmu;
436 struct nvkm_vm *vm;
a11c3198
BS
437
438 if (unlikely(vma->node == NULL))
439 return;
227c95d9
BS
440 vm = vma->vm;
441 mmu = vm->mmu;
442
eb813999
BS
443 nvkm_memory_tags_put(vma->memory, mmu->subdev.device, &vma->tags);
444 nvkm_memory_unref(&vma->memory);
445
1de68568 446 mutex_lock(&vm->mutex);
eb813999
BS
447 if (vm->func->page->desc->func->unmap) {
448 const struct nvkm_vmm_page *page = vm->func->page;
449 while (page->shift != vma->node->type)
450 page++;
451
452 nvkm_vmm_ptes_put(vm, page, vma->node->offset << 12,
453 vma->node->length << 12);
eb813999
BS
454 }
455
42594600 456 nvkm_mm_free(&vm->mm, &vma->node);
1de68568 457 mutex_unlock(&vm->mutex);
780194b1 458
42594600 459 nvkm_vm_ref(NULL, &vma->vm, NULL);
a11c3198
BS
460}
461
d8e83994
BS
462int
463nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
464{
26880e76 465 return nvkm_vmm_boot(vm);
d8e83994
BS
466}
467
806a7335
BS
468static int
469nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
470 u32 block, struct nvkm_vm *vm)
a11c3198 471{
a11c3198 472 u64 mm_length = (offset + length) - mm_offset;
a11c3198
BS
473 int ret;
474
e0bacd2f 475 kref_init(&vm->refcount);
a11c3198 476
806a7335
BS
477 if (block > length)
478 block = length;
479
4d058fab 480 ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
42594600 481 block >> 12);
26880e76 482 if (ret)
806a7335 483 return ret;
806a7335
BS
484
485 return 0;
486}
487
3863c9bc 488int
42594600 489nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
1de68568 490 struct lock_class_key *key, struct nvkm_vm **pvm)
3863c9bc 491{
c9582455 492 struct nvkm_mmu *mmu = device->mmu;
806a7335
BS
493
494 *pvm = NULL;
495 if (mmu->func->vmm.ctor) {
496 int ret = mmu->func->vmm.ctor(mmu, mm_offset,
497 offset + length - mm_offset,
498 NULL, 0, key, "legacy", pvm);
499 if (ret) {
500 nvkm_vm_ref(NULL, pvm, NULL);
501 return ret;
502 }
503
504 ret = nvkm_vm_legacy(mmu, offset, length, mm_offset,
505 (*pvm)->func->page_block ?
506 (*pvm)->func->page_block : 4096, *pvm);
507 if (ret)
508 nvkm_vm_ref(NULL, pvm, NULL);
509
510 return ret;
511 }
512
af3b8d53 513 return -EINVAL;
3863c9bc
BS
514}
515
a11c3198 516static void
42594600 517nvkm_vm_del(struct kref *kref)
a11c3198 518{
42594600 519 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
a11c3198 520
42594600 521 nvkm_mm_fini(&vm->mm);
806a7335
BS
522 if (vm->func)
523 nvkm_vmm_dtor(vm);
a11c3198
BS
524 kfree(vm);
525}
526
527int
d30af7ce 528nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
a11c3198 529{
e0bacd2f 530 if (ref) {
d30af7ce 531 if (ref->func->join && inst) {
26880e76 532 int ret = ref->func->join(ref, inst);
d30af7ce
BS
533 if (ret)
534 return ret;
d30af7ce 535 }
a11c3198 536
e0bacd2f 537 kref_get(&ref->refcount);
a11c3198
BS
538 }
539
e0bacd2f 540 if (*ptr) {
d30af7ce
BS
541 if ((*ptr)->func->part && inst)
542 (*ptr)->func->part(*ptr, inst);
42594600 543 kref_put(&(*ptr)->refcount, nvkm_vm_del);
a11c3198
BS
544 }
545
e0bacd2f 546 *ptr = ref;
a11c3198
BS
547 return 0;
548}
c9582455
BS
549
550static int
551nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
552{
553 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
806a7335
BS
554
555 if (mmu->func->vmm.global) {
556 int ret = nvkm_vm_new(subdev->device, 0, mmu->limit, 0,
557 NULL, &mmu->vmm);
558 if (ret)
559 return ret;
560 }
561
c9582455
BS
562 return 0;
563}
564
565static int
566nvkm_mmu_init(struct nvkm_subdev *subdev)
567{
568 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
569 if (mmu->func->init)
570 mmu->func->init(mmu);
571 return 0;
572}
573
574static void *
575nvkm_mmu_dtor(struct nvkm_subdev *subdev)
576{
577 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
9a45ddaa 578
806a7335 579 nvkm_vm_ref(NULL, &mmu->vmm, NULL);
9a45ddaa
BS
580
581 nvkm_mmu_ptc_fini(mmu);
03b0ba7b 582 return mmu;
c9582455
BS
583}
584
585static const struct nvkm_subdev_func
586nvkm_mmu = {
587 .dtor = nvkm_mmu_dtor,
588 .oneinit = nvkm_mmu_oneinit,
589 .init = nvkm_mmu_init,
590};
591
592void
593nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
594 int index, struct nvkm_mmu *mmu)
595{
56d06fa2 596 nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
c9582455
BS
597 mmu->func = func;
598 mmu->limit = func->limit;
599 mmu->dma_bits = func->dma_bits;
600 mmu->lpg_shift = func->lpg_shift;
9a45ddaa 601 nvkm_mmu_ptc_init(mmu);
c9582455
BS
602}
603
604int
605nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
606 int index, struct nvkm_mmu **pmmu)
607{
608 if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
609 return -ENOMEM;
610 nvkm_mmu_ctor(func, device, index, *pmmu);
611 return 0;
612}