]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drm/nouveau/mmu/nv44: implement new vmm backend
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / base.c
CommitLineData
a11c3198
BS
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
c9582455 24#include "priv.h"
806a7335 25#include "vmm.h"
a11c3198 26
3863c9bc 27#include <core/gpuobj.h>
c9582455 28#include <subdev/fb.h>
a11c3198 29
f1280394
BS
30struct nvkm_mmu_ptp {
31 struct nvkm_mmu_pt *pt;
32 struct list_head head;
33 u8 shift;
34 u16 mask;
35 u16 free;
36};
37
38static void
39nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
40{
41 const int slot = pt->base >> pt->ptp->shift;
42 struct nvkm_mmu_ptp *ptp = pt->ptp;
43
44 /* If there were no free slots in the parent allocation before,
45 * there will be now, so return PTP to the cache.
46 */
47 if (!ptp->free)
48 list_add(&ptp->head, &mmu->ptp.list);
49 ptp->free |= BIT(slot);
50
51 /* If there's no more sub-allocations, destroy PTP. */
52 if (ptp->free == ptp->mask) {
53 nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
54 list_del(&ptp->head);
55 kfree(ptp);
56 }
57
58 kfree(pt);
59}
60
61struct nvkm_mmu_pt *
62nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
63{
64 struct nvkm_mmu_pt *pt;
65 struct nvkm_mmu_ptp *ptp;
66 int slot;
67
68 if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
69 return NULL;
70
71 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
72 if (!ptp) {
73 /* Need to allocate a new parent to sub-allocate from. */
74 if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
75 kfree(pt);
76 return NULL;
77 }
78
79 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
80 if (!ptp->pt) {
81 kfree(ptp);
82 kfree(pt);
83 return NULL;
84 }
85
86 ptp->shift = order_base_2(size);
87 slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
88 ptp->mask = (1 << slot) - 1;
89 ptp->free = ptp->mask;
90 list_add(&ptp->head, &mmu->ptp.list);
91 }
92 pt->ptp = ptp;
93 pt->sub = true;
94
95 /* Sub-allocate from parent object, removing PTP from cache
96 * if there's no more free slots left.
97 */
98 slot = __ffs(ptp->free);
99 ptp->free &= ~BIT(slot);
100 if (!ptp->free)
101 list_del(&ptp->head);
102
103 pt->memory = pt->ptp->pt->memory;
104 pt->base = slot << ptp->shift;
105 pt->addr = pt->ptp->pt->addr + pt->base;
106 return pt;
107}
108
9a45ddaa
BS
109struct nvkm_mmu_ptc {
110 struct list_head head;
111 struct list_head item;
112 u32 size;
113 u32 refs;
114};
115
116static inline struct nvkm_mmu_ptc *
117nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
118{
119 struct nvkm_mmu_ptc *ptc;
120
121 list_for_each_entry(ptc, &mmu->ptc.list, head) {
122 if (ptc->size == size)
123 return ptc;
124 }
125
126 ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
127 if (ptc) {
128 INIT_LIST_HEAD(&ptc->item);
129 ptc->size = size;
130 ptc->refs = 0;
131 list_add(&ptc->head, &mmu->ptc.list);
132 }
133
134 return ptc;
135}
136
137void
138nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
139{
140 struct nvkm_mmu_pt *pt = *ppt;
141 if (pt) {
f1280394
BS
142 /* Handle sub-allocated page tables. */
143 if (pt->sub) {
144 mutex_lock(&mmu->ptp.mutex);
145 nvkm_mmu_ptp_put(mmu, force, pt);
146 mutex_unlock(&mmu->ptp.mutex);
147 return;
148 }
149
9a45ddaa
BS
150 /* Either cache or free the object. */
151 mutex_lock(&mmu->ptc.mutex);
152 if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
153 list_add_tail(&pt->head, &pt->ptc->item);
154 pt->ptc->refs++;
155 } else {
156 nvkm_memory_unref(&pt->memory);
157 kfree(pt);
158 }
159 mutex_unlock(&mmu->ptc.mutex);
160 }
161}
162
163struct nvkm_mmu_pt *
164nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
165{
166 struct nvkm_mmu_ptc *ptc;
167 struct nvkm_mmu_pt *pt;
168 int ret;
169
f1280394
BS
170 /* Sub-allocated page table (ie. GP100 LPT). */
171 if (align < 0x1000) {
172 mutex_lock(&mmu->ptp.mutex);
173 pt = nvkm_mmu_ptp_get(mmu, align, zero);
174 mutex_unlock(&mmu->ptp.mutex);
175 return pt;
176 }
177
9a45ddaa
BS
178 /* Lookup cache for this page table size. */
179 mutex_lock(&mmu->ptc.mutex);
180 ptc = nvkm_mmu_ptc_find(mmu, size);
181 if (!ptc) {
182 mutex_unlock(&mmu->ptc.mutex);
183 return NULL;
184 }
185
186 /* If there's a free PT in the cache, reuse it. */
187 pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
188 if (pt) {
189 if (zero)
190 nvkm_fo64(pt->memory, 0, 0, size >> 3);
191 list_del(&pt->head);
192 ptc->refs--;
193 mutex_unlock(&mmu->ptc.mutex);
194 return pt;
195 }
196 mutex_unlock(&mmu->ptc.mutex);
197
198 /* No such luck, we need to allocate. */
199 if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
200 return NULL;
201 pt->ptc = ptc;
f1280394 202 pt->sub = false;
9a45ddaa
BS
203
204 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
205 size, align, zero, &pt->memory);
206 if (ret) {
207 kfree(pt);
208 return NULL;
209 }
210
211 pt->base = 0;
212 pt->addr = nvkm_memory_addr(pt->memory);
213 return pt;
214}
215
eb813999
BS
216static void
217nvkm_vm_map_(const struct nvkm_vmm_page *page, struct nvkm_vma *vma, u64 delta,
218 struct nvkm_mem *mem, nvkm_vmm_pte_func fn,
219 struct nvkm_vmm_map *map)
220{
221 struct nvkm_vmm *vmm = vma->vm;
222 void *argv = NULL;
223 u32 argc = 0;
224 int ret;
225
226 map->memory = mem->memory;
227 map->page = page;
228
229 if (vmm->func->valid) {
230 ret = vmm->func->valid(vmm, argv, argc, map);
231 if (WARN_ON(ret))
232 return;
233 }
234
235 mutex_lock(&vmm->mutex);
236 nvkm_vmm_ptes_map(vmm, page, ((u64)vma->node->offset << 12) + delta,
237 (u64)vma->node->length << 12, map, fn);
238 mutex_unlock(&vmm->mutex);
239
240 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
241 nvkm_memory_unref(&vma->memory);
242 vma->memory = nvkm_memory_ref(map->memory);
243 vma->tags = map->tags;
244}
245
9a45ddaa
BS
246void
247nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
248{
249 struct nvkm_mmu_ptc *ptc;
250 list_for_each_entry(ptc, &mmu->ptc.list, head) {
251 struct nvkm_mmu_pt *pt, *tt;
252 list_for_each_entry_safe(pt, tt, &ptc->item, head) {
253 nvkm_memory_unref(&pt->memory);
254 list_del(&pt->head);
255 kfree(pt);
256 }
257 }
258}
259
260static void
261nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
262{
263 struct nvkm_mmu_ptc *ptc, *ptct;
264
265 list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
266 WARN_ON(!list_empty(&ptc->item));
267 list_del(&ptc->head);
268 kfree(ptc);
269 }
270}
271
272static void
273nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
274{
275 mutex_init(&mmu->ptc.mutex);
276 INIT_LIST_HEAD(&mmu->ptc.list);
f1280394
BS
277 mutex_init(&mmu->ptp.mutex);
278 INIT_LIST_HEAD(&mmu->ptp.list);
9a45ddaa
BS
279}
280
a11c3198 281void
42594600 282nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
a11c3198 283{
eb813999 284 const struct nvkm_vmm_page *page = vma->vm->func->page;
42594600
BS
285 struct nvkm_vm *vm = vma->vm;
286 struct nvkm_mmu *mmu = vm->mmu;
134fdc1a 287 struct nvkm_mm_node *r = node->mem;
c9582455 288 int big = vma->node->type != mmu->func->spg_shift;
a11c3198
BS
289 u32 offset = vma->node->offset + (delta >> 12);
290 u32 bits = vma->node->type - 12;
c9582455
BS
291 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
292 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
293 u32 max = 1 << (mmu->func->pgt_bits - bits);
a11c3198
BS
294 u32 end, len;
295
eb813999
BS
296 if (page->desc->func->unmap) {
297 struct nvkm_vmm_map map = { .mem = node->mem };
298 while (page->shift != vma->node->type)
299 page++;
300 nvkm_vm_map_(page, vma, delta, node, page->desc->func->mem, &map);
301 return;
302 }
303
8f7286f8 304 delta = 0;
134fdc1a 305 while (r) {
a11c3198
BS
306 u64 phys = (u64)r->offset << 12;
307 u32 num = r->length >> bits;
308
309 while (num) {
d0659d32 310 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
a11c3198
BS
311
312 end = (pte + num);
313 if (unlikely(end >= max))
314 end = max;
315 len = end - pte;
316
c9582455 317 mmu->func->map(vma, pgt, node, pte, len, phys, delta);
a11c3198
BS
318
319 num -= len;
320 pte += len;
321 if (unlikely(end >= max)) {
73c337e7 322 phys += len << (bits + 12);
a11c3198
BS
323 pde++;
324 pte = 0;
325 }
8f7286f8
BS
326
327 delta += (u64)len << vma->node->type;
a11c3198 328 }
134fdc1a 329 r = r->next;
f5a5b523 330 }
a11c3198 331
c9582455 332 mmu->func->flush(vm);
a11c3198
BS
333}
334
2e2cfbe6 335static void
42594600
BS
336nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
337 struct nvkm_mem *mem)
22b33e8e 338{
eb813999 339 const struct nvkm_vmm_page *page = vma->vm->func->page;
42594600
BS
340 struct nvkm_vm *vm = vma->vm;
341 struct nvkm_mmu *mmu = vm->mmu;
c9582455 342 int big = vma->node->type != mmu->func->spg_shift;
22b33e8e
DA
343 u32 offset = vma->node->offset + (delta >> 12);
344 u32 bits = vma->node->type - 12;
345 u32 num = length >> vma->node->type;
c9582455
BS
346 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
347 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
348 u32 max = 1 << (mmu->func->pgt_bits - bits);
22b33e8e
DA
349 unsigned m, sglen;
350 u32 end, len;
351 int i;
352 struct scatterlist *sg;
353
eb813999
BS
354 if (page->desc->func->unmap) {
355 struct nvkm_vmm_map map = { .sgl = mem->sg->sgl };
356 while (page->shift != vma->node->type)
357 page++;
358 nvkm_vm_map_(page, vma, delta, mem, page->desc->func->sgl, &map);
359 return;
360 }
361
22b33e8e 362 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
d0659d32 363 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
22b33e8e
DA
364 sglen = sg_dma_len(sg) >> PAGE_SHIFT;
365
366 end = pte + sglen;
367 if (unlikely(end >= max))
368 end = max;
369 len = end - pte;
370
371 for (m = 0; m < len; m++) {
372 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
373
c9582455 374 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
22b33e8e
DA
375 num--;
376 pte++;
377
378 if (num == 0)
379 goto finish;
380 }
381 if (unlikely(end >= max)) {
382 pde++;
383 pte = 0;
384 }
385 if (m < sglen) {
386 for (; m < sglen; m++) {
387 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
388
c9582455 389 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
22b33e8e
DA
390 num--;
391 pte++;
392 if (num == 0)
393 goto finish;
394 }
395 }
396
397 }
398finish:
c9582455 399 mmu->func->flush(vm);
22b33e8e
DA
400}
401
2e2cfbe6 402static void
42594600
BS
403nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
404 struct nvkm_mem *mem)
a11c3198 405{
eb813999 406 const struct nvkm_vmm_page *page = vma->vm->func->page;
42594600
BS
407 struct nvkm_vm *vm = vma->vm;
408 struct nvkm_mmu *mmu = vm->mmu;
f7b24c42 409 dma_addr_t *list = mem->pages;
c9582455 410 int big = vma->node->type != mmu->func->spg_shift;
a11c3198
BS
411 u32 offset = vma->node->offset + (delta >> 12);
412 u32 bits = vma->node->type - 12;
413 u32 num = length >> vma->node->type;
c9582455
BS
414 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
415 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
416 u32 max = 1 << (mmu->func->pgt_bits - bits);
a11c3198
BS
417 u32 end, len;
418
eb813999
BS
419 if (page->desc->func->unmap) {
420 struct nvkm_vmm_map map = { .dma = mem->pages };
421 while (page->shift != vma->node->type)
422 page++;
423 nvkm_vm_map_(page, vma, delta, mem, page->desc->func->dma, &map);
424 return;
425 }
426
a11c3198 427 while (num) {
d0659d32 428 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
a11c3198
BS
429
430 end = (pte + num);
431 if (unlikely(end >= max))
432 end = max;
433 len = end - pte;
434
c9582455 435 mmu->func->map_sg(vma, pgt, mem, pte, len, list);
a11c3198
BS
436
437 num -= len;
438 pte += len;
439 list += len;
440 if (unlikely(end >= max)) {
441 pde++;
442 pte = 0;
443 }
444 }
445
c9582455 446 mmu->func->flush(vm);
a11c3198
BS
447}
448
2e2cfbe6 449void
42594600 450nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
2e2cfbe6
BS
451{
452 if (node->sg)
42594600 453 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
2e2cfbe6
BS
454 else
455 if (node->pages)
42594600 456 nvkm_vm_map_sg(vma, 0, node->size << 12, node);
2e2cfbe6 457 else
42594600 458 nvkm_vm_map_at(vma, 0, node);
2e2cfbe6
BS
459}
460
a11c3198 461void
42594600 462nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
a11c3198 463{
42594600
BS
464 struct nvkm_vm *vm = vma->vm;
465 struct nvkm_mmu *mmu = vm->mmu;
c9582455 466 int big = vma->node->type != mmu->func->spg_shift;
a11c3198
BS
467 u32 offset = vma->node->offset + (delta >> 12);
468 u32 bits = vma->node->type - 12;
469 u32 num = length >> vma->node->type;
c9582455
BS
470 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
471 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
472 u32 max = 1 << (mmu->func->pgt_bits - bits);
a11c3198
BS
473 u32 end, len;
474
eb813999
BS
475 if (vm->func->page->desc->func->unmap) {
476 const struct nvkm_vmm_page *page = vm->func->page;
477 while (page->shift != vma->node->type)
478 page++;
479 mutex_lock(&vm->mutex);
480 nvkm_vmm_ptes_unmap(vm, page, (vma->node->offset << 12) + delta,
481 vma->node->length << 12, false);
482 mutex_unlock(&vm->mutex);
483 return;
484 }
485
a11c3198 486 while (num) {
d0659d32 487 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
a11c3198
BS
488
489 end = (pte + num);
490 if (unlikely(end >= max))
491 end = max;
492 len = end - pte;
493
c9582455 494 mmu->func->unmap(vma, pgt, pte, len);
a11c3198
BS
495
496 num -= len;
497 pte += len;
498 if (unlikely(end >= max)) {
499 pde++;
500 pte = 0;
501 }
502 }
503
c9582455 504 mmu->func->flush(vm);
a11c3198
BS
505}
506
507void
42594600 508nvkm_vm_unmap(struct nvkm_vma *vma)
a11c3198 509{
42594600 510 nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
eb813999
BS
511
512 nvkm_memory_tags_put(vma->memory, vma->vm->mmu->subdev.device, &vma->tags);
513 nvkm_memory_unref(&vma->memory);
a11c3198
BS
514}
515
516static void
42594600 517nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
a11c3198 518{
42594600 519 struct nvkm_mmu *mmu = vm->mmu;
42594600 520 struct nvkm_vm_pgt *vpgt;
d0659d32 521 struct nvkm_memory *pgt;
a11c3198
BS
522 u32 pde;
523
524 for (pde = fpde; pde <= lpde; pde++) {
525 vpgt = &vm->pgt[pde - vm->fpde];
3ee01281 526 if (--vpgt->refcount[big])
a11c3198
BS
527 continue;
528
d0659d32
BS
529 pgt = vpgt->mem[big];
530 vpgt->mem[big] = NULL;
3ee01281 531
d30af7ce
BS
532 if (mmu->func->map_pgt)
533 mmu->func->map_pgt(vm, pde, vpgt->mem);
a11c3198 534
77913bbc
BS
535 mmu->func->flush(vm);
536
997a8900 537 nvkm_memory_unref(&pgt);
a11c3198
BS
538 }
539}
540
541static int
42594600 542nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
a11c3198 543{
42594600
BS
544 struct nvkm_mmu *mmu = vm->mmu;
545 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
c9582455 546 int big = (type != mmu->func->spg_shift);
a11c3198
BS
547 u32 pgt_size;
548 int ret;
549
c9582455 550 pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
a11c3198
BS
551 pgt_size *= 8;
552
d0659d32
BS
553 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
554 pgt_size, 0x1000, true, &vpgt->mem[big]);
a11c3198
BS
555 if (unlikely(ret))
556 return ret;
557
d30af7ce
BS
558 if (mmu->func->map_pgt)
559 mmu->func->map_pgt(vm, pde, vpgt->mem);
a11c3198 560
1de68568 561 vpgt->refcount[big]++;
a11c3198
BS
562 return 0;
563}
564
565int
42594600
BS
566nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
567 struct nvkm_vma *vma)
a11c3198 568{
42594600 569 struct nvkm_mmu *mmu = vm->mmu;
a11c3198
BS
570 u32 align = (1 << page_shift) >> 12;
571 u32 msize = size >> 12;
572 u32 fpde, lpde, pde;
573 int ret;
574
1de68568 575 mutex_lock(&vm->mutex);
42594600
BS
576 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
577 &vma->node);
a11c3198 578 if (unlikely(ret != 0)) {
1de68568 579 mutex_unlock(&vm->mutex);
a11c3198
BS
580 return ret;
581 }
582
eb813999
BS
583 if (vm->func->page->desc->func->unmap) {
584 const struct nvkm_vmm_page *page = vm->func->page;
585 while (page->shift != page_shift)
586 page++;
587
588 ret = nvkm_vmm_ptes_get(vm, page, vma->node->offset << 12,
589 vma->node->length << 12);
590 if (ret) {
591 nvkm_mm_free(&vm->mm, &vma->node);
592 mutex_unlock(&vm->mutex);
593 return ret;
594 }
595
596 goto done;
597 }
598
c9582455
BS
599 fpde = (vma->node->offset >> mmu->func->pgt_bits);
600 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
ebb945a9 601
a11c3198 602 for (pde = fpde; pde <= lpde; pde++) {
42594600 603 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
c9582455 604 int big = (vma->node->type != mmu->func->spg_shift);
a11c3198 605
3ee01281
BS
606 if (likely(vpgt->refcount[big])) {
607 vpgt->refcount[big]++;
a11c3198
BS
608 continue;
609 }
610
42594600 611 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
a11c3198
BS
612 if (ret) {
613 if (pde != fpde)
42594600
BS
614 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
615 nvkm_mm_free(&vm->mm, &vma->node);
1de68568 616 mutex_unlock(&vm->mutex);
a11c3198
BS
617 return ret;
618 }
619 }
eb813999 620done:
1de68568 621 mutex_unlock(&vm->mutex);
a11c3198 622
eb813999
BS
623 vma->memory = NULL;
624 vma->tags = NULL;
780194b1 625 vma->vm = NULL;
42594600 626 nvkm_vm_ref(vm, &vma->vm, NULL);
a11c3198
BS
627 vma->offset = (u64)vma->node->offset << 12;
628 vma->access = access;
629 return 0;
630}
631
632void
42594600 633nvkm_vm_put(struct nvkm_vma *vma)
a11c3198 634{
227c95d9
BS
635 struct nvkm_mmu *mmu;
636 struct nvkm_vm *vm;
a11c3198
BS
637 u32 fpde, lpde;
638
639 if (unlikely(vma->node == NULL))
640 return;
227c95d9
BS
641 vm = vma->vm;
642 mmu = vm->mmu;
643
eb813999
BS
644 nvkm_memory_tags_put(vma->memory, mmu->subdev.device, &vma->tags);
645 nvkm_memory_unref(&vma->memory);
646
c9582455
BS
647 fpde = (vma->node->offset >> mmu->func->pgt_bits);
648 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
a11c3198 649
1de68568 650 mutex_lock(&vm->mutex);
eb813999
BS
651 if (vm->func->page->desc->func->unmap) {
652 const struct nvkm_vmm_page *page = vm->func->page;
653 while (page->shift != vma->node->type)
654 page++;
655
656 nvkm_vmm_ptes_put(vm, page, vma->node->offset << 12,
657 vma->node->length << 12);
658 goto done;
659 }
660
c9582455 661 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
eb813999 662done:
42594600 663 nvkm_mm_free(&vm->mm, &vma->node);
1de68568 664 mutex_unlock(&vm->mutex);
780194b1 665
42594600 666 nvkm_vm_ref(NULL, &vma->vm, NULL);
a11c3198
BS
667}
668
d8e83994
BS
669int
670nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
671{
672 struct nvkm_mmu *mmu = vm->mmu;
d0659d32 673 struct nvkm_memory *pgt;
d8e83994
BS
674 int ret;
675
eb813999
BS
676 if (vm->func->page->desc->func->unmap)
677 return nvkm_vmm_boot(vm);
678
d0659d32 679 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
c9582455 680 (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
d8e83994
BS
681 if (ret == 0) {
682 vm->pgt[0].refcount[0] = 1;
d0659d32
BS
683 vm->pgt[0].mem[0] = pgt;
684 nvkm_memory_boot(pgt, vm);
5e075fde 685 vm->bootstrapped = true;
d8e83994
BS
686 }
687
688 return ret;
689}
690
806a7335
BS
691static int
692nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
693 u32 block, struct nvkm_vm *vm)
a11c3198 694{
a11c3198 695 u64 mm_length = (offset + length) - mm_offset;
a11c3198
BS
696 int ret;
697
e0bacd2f 698 kref_init(&vm->refcount);
c9582455
BS
699 vm->fpde = offset >> (mmu->func->pgt_bits + 12);
700 vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
a11c3198 701
d005f51e 702 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
a11c3198
BS
703 if (!vm->pgt) {
704 kfree(vm);
705 return -ENOMEM;
706 }
707
806a7335
BS
708 if (block > length)
709 block = length;
710
4d058fab 711 ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
42594600 712 block >> 12);
a11c3198 713 if (ret) {
d005f51e 714 vfree(vm->pgt);
806a7335
BS
715 return ret;
716 }
717
718 return 0;
719}
720
3863c9bc 721int
42594600 722nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
1de68568 723 struct lock_class_key *key, struct nvkm_vm **pvm)
3863c9bc 724{
c9582455 725 struct nvkm_mmu *mmu = device->mmu;
806a7335
BS
726
727 *pvm = NULL;
728 if (mmu->func->vmm.ctor) {
729 int ret = mmu->func->vmm.ctor(mmu, mm_offset,
730 offset + length - mm_offset,
731 NULL, 0, key, "legacy", pvm);
732 if (ret) {
733 nvkm_vm_ref(NULL, pvm, NULL);
734 return ret;
735 }
736
737 ret = nvkm_vm_legacy(mmu, offset, length, mm_offset,
738 (*pvm)->func->page_block ?
739 (*pvm)->func->page_block : 4096, *pvm);
740 if (ret)
741 nvkm_vm_ref(NULL, pvm, NULL);
742
743 return ret;
744 }
745
af3b8d53 746 return -EINVAL;
3863c9bc
BS
747}
748
a11c3198 749static void
42594600 750nvkm_vm_del(struct kref *kref)
a11c3198 751{
42594600 752 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
a11c3198 753
42594600 754 nvkm_mm_fini(&vm->mm);
d005f51e 755 vfree(vm->pgt);
806a7335
BS
756 if (vm->func)
757 nvkm_vmm_dtor(vm);
a11c3198
BS
758 kfree(vm);
759}
760
761int
d30af7ce 762nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
a11c3198 763{
e0bacd2f 764 if (ref) {
d30af7ce
BS
765 if (ref->func->join && inst) {
766 int ret = ref->func->join(ref, inst), i;
767 if (ret)
768 return ret;
769
eb813999 770 if (!ref->func->page->desc->func->unmap && ref->mmu->func->map_pgt) {
d30af7ce
BS
771 for (i = ref->fpde; i <= ref->lpde; i++)
772 ref->mmu->func->map_pgt(ref, i, ref->pgt[i - ref->fpde].mem);
773 }
774 }
a11c3198 775
e0bacd2f 776 kref_get(&ref->refcount);
a11c3198
BS
777 }
778
e0bacd2f 779 if (*ptr) {
d30af7ce
BS
780 if ((*ptr)->func->part && inst)
781 (*ptr)->func->part(*ptr, inst);
eb813999
BS
782 if ((*ptr)->bootstrapped && inst) {
783 if (!(*ptr)->func->page->desc->func->unmap) {
784 nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
785 (*ptr)->bootstrapped = false;
786 }
787 }
42594600 788 kref_put(&(*ptr)->refcount, nvkm_vm_del);
a11c3198
BS
789 }
790
e0bacd2f 791 *ptr = ref;
a11c3198
BS
792 return 0;
793}
c9582455
BS
794
795static int
796nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
797{
798 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
806a7335
BS
799
800 if (mmu->func->vmm.global) {
801 int ret = nvkm_vm_new(subdev->device, 0, mmu->limit, 0,
802 NULL, &mmu->vmm);
803 if (ret)
804 return ret;
805 }
806
c9582455
BS
807 if (mmu->func->oneinit)
808 return mmu->func->oneinit(mmu);
806a7335 809
c9582455
BS
810 return 0;
811}
812
813static int
814nvkm_mmu_init(struct nvkm_subdev *subdev)
815{
816 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
817 if (mmu->func->init)
818 mmu->func->init(mmu);
819 return 0;
820}
821
822static void *
823nvkm_mmu_dtor(struct nvkm_subdev *subdev)
824{
825 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
9a45ddaa 826
806a7335 827 nvkm_vm_ref(NULL, &mmu->vmm, NULL);
9a45ddaa
BS
828
829 nvkm_mmu_ptc_fini(mmu);
03b0ba7b 830 return mmu;
c9582455
BS
831}
832
833static const struct nvkm_subdev_func
834nvkm_mmu = {
835 .dtor = nvkm_mmu_dtor,
836 .oneinit = nvkm_mmu_oneinit,
837 .init = nvkm_mmu_init,
838};
839
840void
841nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
842 int index, struct nvkm_mmu *mmu)
843{
56d06fa2 844 nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
c9582455
BS
845 mmu->func = func;
846 mmu->limit = func->limit;
847 mmu->dma_bits = func->dma_bits;
848 mmu->lpg_shift = func->lpg_shift;
9a45ddaa 849 nvkm_mmu_ptc_init(mmu);
c9582455
BS
850}
851
852int
853nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
854 int index, struct nvkm_mmu **pmmu)
855{
856 if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
857 return -ENOMEM;
858 nvkm_mmu_ctor(func, device, index, *pmmu);
859 return 0;
860}