]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drm/nouveau/mmu/nv44: implement vmm on top of new base
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / base.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25 #include "vmm.h"
26
27 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
29
30 struct nvkm_mmu_ptp {
31 struct nvkm_mmu_pt *pt;
32 struct list_head head;
33 u8 shift;
34 u16 mask;
35 u16 free;
36 };
37
38 static void
39 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
40 {
41 const int slot = pt->base >> pt->ptp->shift;
42 struct nvkm_mmu_ptp *ptp = pt->ptp;
43
44 /* If there were no free slots in the parent allocation before,
45 * there will be now, so return PTP to the cache.
46 */
47 if (!ptp->free)
48 list_add(&ptp->head, &mmu->ptp.list);
49 ptp->free |= BIT(slot);
50
51 /* If there's no more sub-allocations, destroy PTP. */
52 if (ptp->free == ptp->mask) {
53 nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
54 list_del(&ptp->head);
55 kfree(ptp);
56 }
57
58 kfree(pt);
59 }
60
61 struct nvkm_mmu_pt *
62 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
63 {
64 struct nvkm_mmu_pt *pt;
65 struct nvkm_mmu_ptp *ptp;
66 int slot;
67
68 if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
69 return NULL;
70
71 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
72 if (!ptp) {
73 /* Need to allocate a new parent to sub-allocate from. */
74 if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
75 kfree(pt);
76 return NULL;
77 }
78
79 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
80 if (!ptp->pt) {
81 kfree(ptp);
82 kfree(pt);
83 return NULL;
84 }
85
86 ptp->shift = order_base_2(size);
87 slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
88 ptp->mask = (1 << slot) - 1;
89 ptp->free = ptp->mask;
90 list_add(&ptp->head, &mmu->ptp.list);
91 }
92 pt->ptp = ptp;
93 pt->sub = true;
94
95 /* Sub-allocate from parent object, removing PTP from cache
96 * if there's no more free slots left.
97 */
98 slot = __ffs(ptp->free);
99 ptp->free &= ~BIT(slot);
100 if (!ptp->free)
101 list_del(&ptp->head);
102
103 pt->memory = pt->ptp->pt->memory;
104 pt->base = slot << ptp->shift;
105 pt->addr = pt->ptp->pt->addr + pt->base;
106 return pt;
107 }
108
109 struct nvkm_mmu_ptc {
110 struct list_head head;
111 struct list_head item;
112 u32 size;
113 u32 refs;
114 };
115
116 static inline struct nvkm_mmu_ptc *
117 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
118 {
119 struct nvkm_mmu_ptc *ptc;
120
121 list_for_each_entry(ptc, &mmu->ptc.list, head) {
122 if (ptc->size == size)
123 return ptc;
124 }
125
126 ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
127 if (ptc) {
128 INIT_LIST_HEAD(&ptc->item);
129 ptc->size = size;
130 ptc->refs = 0;
131 list_add(&ptc->head, &mmu->ptc.list);
132 }
133
134 return ptc;
135 }
136
137 void
138 nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
139 {
140 struct nvkm_mmu_pt *pt = *ppt;
141 if (pt) {
142 /* Handle sub-allocated page tables. */
143 if (pt->sub) {
144 mutex_lock(&mmu->ptp.mutex);
145 nvkm_mmu_ptp_put(mmu, force, pt);
146 mutex_unlock(&mmu->ptp.mutex);
147 return;
148 }
149
150 /* Either cache or free the object. */
151 mutex_lock(&mmu->ptc.mutex);
152 if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
153 list_add_tail(&pt->head, &pt->ptc->item);
154 pt->ptc->refs++;
155 } else {
156 nvkm_memory_unref(&pt->memory);
157 kfree(pt);
158 }
159 mutex_unlock(&mmu->ptc.mutex);
160 }
161 }
162
163 struct nvkm_mmu_pt *
164 nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
165 {
166 struct nvkm_mmu_ptc *ptc;
167 struct nvkm_mmu_pt *pt;
168 int ret;
169
170 /* Sub-allocated page table (ie. GP100 LPT). */
171 if (align < 0x1000) {
172 mutex_lock(&mmu->ptp.mutex);
173 pt = nvkm_mmu_ptp_get(mmu, align, zero);
174 mutex_unlock(&mmu->ptp.mutex);
175 return pt;
176 }
177
178 /* Lookup cache for this page table size. */
179 mutex_lock(&mmu->ptc.mutex);
180 ptc = nvkm_mmu_ptc_find(mmu, size);
181 if (!ptc) {
182 mutex_unlock(&mmu->ptc.mutex);
183 return NULL;
184 }
185
186 /* If there's a free PT in the cache, reuse it. */
187 pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
188 if (pt) {
189 if (zero)
190 nvkm_fo64(pt->memory, 0, 0, size >> 3);
191 list_del(&pt->head);
192 ptc->refs--;
193 mutex_unlock(&mmu->ptc.mutex);
194 return pt;
195 }
196 mutex_unlock(&mmu->ptc.mutex);
197
198 /* No such luck, we need to allocate. */
199 if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
200 return NULL;
201 pt->ptc = ptc;
202 pt->sub = false;
203
204 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
205 size, align, zero, &pt->memory);
206 if (ret) {
207 kfree(pt);
208 return NULL;
209 }
210
211 pt->base = 0;
212 pt->addr = nvkm_memory_addr(pt->memory);
213 return pt;
214 }
215
216 void
217 nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
218 {
219 struct nvkm_mmu_ptc *ptc;
220 list_for_each_entry(ptc, &mmu->ptc.list, head) {
221 struct nvkm_mmu_pt *pt, *tt;
222 list_for_each_entry_safe(pt, tt, &ptc->item, head) {
223 nvkm_memory_unref(&pt->memory);
224 list_del(&pt->head);
225 kfree(pt);
226 }
227 }
228 }
229
230 static void
231 nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
232 {
233 struct nvkm_mmu_ptc *ptc, *ptct;
234
235 list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
236 WARN_ON(!list_empty(&ptc->item));
237 list_del(&ptc->head);
238 kfree(ptc);
239 }
240 }
241
242 static void
243 nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
244 {
245 mutex_init(&mmu->ptc.mutex);
246 INIT_LIST_HEAD(&mmu->ptc.list);
247 mutex_init(&mmu->ptp.mutex);
248 INIT_LIST_HEAD(&mmu->ptp.list);
249 }
250
251 void
252 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
253 {
254 struct nvkm_vm *vm = vma->vm;
255 struct nvkm_mmu *mmu = vm->mmu;
256 struct nvkm_mm_node *r = node->mem;
257 int big = vma->node->type != mmu->func->spg_shift;
258 u32 offset = vma->node->offset + (delta >> 12);
259 u32 bits = vma->node->type - 12;
260 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
261 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
262 u32 max = 1 << (mmu->func->pgt_bits - bits);
263 u32 end, len;
264
265 delta = 0;
266 while (r) {
267 u64 phys = (u64)r->offset << 12;
268 u32 num = r->length >> bits;
269
270 while (num) {
271 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
272
273 end = (pte + num);
274 if (unlikely(end >= max))
275 end = max;
276 len = end - pte;
277
278 mmu->func->map(vma, pgt, node, pte, len, phys, delta);
279
280 num -= len;
281 pte += len;
282 if (unlikely(end >= max)) {
283 phys += len << (bits + 12);
284 pde++;
285 pte = 0;
286 }
287
288 delta += (u64)len << vma->node->type;
289 }
290 r = r->next;
291 }
292
293 mmu->func->flush(vm);
294 }
295
296 static void
297 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
298 struct nvkm_mem *mem)
299 {
300 struct nvkm_vm *vm = vma->vm;
301 struct nvkm_mmu *mmu = vm->mmu;
302 int big = vma->node->type != mmu->func->spg_shift;
303 u32 offset = vma->node->offset + (delta >> 12);
304 u32 bits = vma->node->type - 12;
305 u32 num = length >> vma->node->type;
306 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
307 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
308 u32 max = 1 << (mmu->func->pgt_bits - bits);
309 unsigned m, sglen;
310 u32 end, len;
311 int i;
312 struct scatterlist *sg;
313
314 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
315 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
316 sglen = sg_dma_len(sg) >> PAGE_SHIFT;
317
318 end = pte + sglen;
319 if (unlikely(end >= max))
320 end = max;
321 len = end - pte;
322
323 for (m = 0; m < len; m++) {
324 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
325
326 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
327 num--;
328 pte++;
329
330 if (num == 0)
331 goto finish;
332 }
333 if (unlikely(end >= max)) {
334 pde++;
335 pte = 0;
336 }
337 if (m < sglen) {
338 for (; m < sglen; m++) {
339 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
340
341 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
342 num--;
343 pte++;
344 if (num == 0)
345 goto finish;
346 }
347 }
348
349 }
350 finish:
351 mmu->func->flush(vm);
352 }
353
354 static void
355 nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
356 struct nvkm_mem *mem)
357 {
358 struct nvkm_vm *vm = vma->vm;
359 struct nvkm_mmu *mmu = vm->mmu;
360 dma_addr_t *list = mem->pages;
361 int big = vma->node->type != mmu->func->spg_shift;
362 u32 offset = vma->node->offset + (delta >> 12);
363 u32 bits = vma->node->type - 12;
364 u32 num = length >> vma->node->type;
365 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
366 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
367 u32 max = 1 << (mmu->func->pgt_bits - bits);
368 u32 end, len;
369
370 while (num) {
371 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
372
373 end = (pte + num);
374 if (unlikely(end >= max))
375 end = max;
376 len = end - pte;
377
378 mmu->func->map_sg(vma, pgt, mem, pte, len, list);
379
380 num -= len;
381 pte += len;
382 list += len;
383 if (unlikely(end >= max)) {
384 pde++;
385 pte = 0;
386 }
387 }
388
389 mmu->func->flush(vm);
390 }
391
392 void
393 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
394 {
395 if (node->sg)
396 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
397 else
398 if (node->pages)
399 nvkm_vm_map_sg(vma, 0, node->size << 12, node);
400 else
401 nvkm_vm_map_at(vma, 0, node);
402 }
403
404 void
405 nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
406 {
407 struct nvkm_vm *vm = vma->vm;
408 struct nvkm_mmu *mmu = vm->mmu;
409 int big = vma->node->type != mmu->func->spg_shift;
410 u32 offset = vma->node->offset + (delta >> 12);
411 u32 bits = vma->node->type - 12;
412 u32 num = length >> vma->node->type;
413 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
414 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
415 u32 max = 1 << (mmu->func->pgt_bits - bits);
416 u32 end, len;
417
418 while (num) {
419 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
420
421 end = (pte + num);
422 if (unlikely(end >= max))
423 end = max;
424 len = end - pte;
425
426 mmu->func->unmap(vma, pgt, pte, len);
427
428 num -= len;
429 pte += len;
430 if (unlikely(end >= max)) {
431 pde++;
432 pte = 0;
433 }
434 }
435
436 mmu->func->flush(vm);
437 }
438
439 void
440 nvkm_vm_unmap(struct nvkm_vma *vma)
441 {
442 nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
443 }
444
445 static void
446 nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
447 {
448 struct nvkm_mmu *mmu = vm->mmu;
449 struct nvkm_vm_pgd *vpgd;
450 struct nvkm_vm_pgt *vpgt;
451 struct nvkm_memory *pgt;
452 u32 pde;
453
454 for (pde = fpde; pde <= lpde; pde++) {
455 vpgt = &vm->pgt[pde - vm->fpde];
456 if (--vpgt->refcount[big])
457 continue;
458
459 pgt = vpgt->mem[big];
460 vpgt->mem[big] = NULL;
461
462 list_for_each_entry(vpgd, &vm->pgd_list, head) {
463 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
464 }
465
466 mmu->func->flush(vm);
467
468 nvkm_memory_unref(&pgt);
469 }
470 }
471
472 static int
473 nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
474 {
475 struct nvkm_mmu *mmu = vm->mmu;
476 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
477 struct nvkm_vm_pgd *vpgd;
478 int big = (type != mmu->func->spg_shift);
479 u32 pgt_size;
480 int ret;
481
482 pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
483 pgt_size *= 8;
484
485 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
486 pgt_size, 0x1000, true, &vpgt->mem[big]);
487 if (unlikely(ret))
488 return ret;
489
490 list_for_each_entry(vpgd, &vm->pgd_list, head) {
491 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
492 }
493
494 vpgt->refcount[big]++;
495 return 0;
496 }
497
498 int
499 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
500 struct nvkm_vma *vma)
501 {
502 struct nvkm_mmu *mmu = vm->mmu;
503 u32 align = (1 << page_shift) >> 12;
504 u32 msize = size >> 12;
505 u32 fpde, lpde, pde;
506 int ret;
507
508 mutex_lock(&vm->mutex);
509 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
510 &vma->node);
511 if (unlikely(ret != 0)) {
512 mutex_unlock(&vm->mutex);
513 return ret;
514 }
515
516 fpde = (vma->node->offset >> mmu->func->pgt_bits);
517 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
518
519 for (pde = fpde; pde <= lpde; pde++) {
520 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
521 int big = (vma->node->type != mmu->func->spg_shift);
522
523 if (likely(vpgt->refcount[big])) {
524 vpgt->refcount[big]++;
525 continue;
526 }
527
528 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
529 if (ret) {
530 if (pde != fpde)
531 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
532 nvkm_mm_free(&vm->mm, &vma->node);
533 mutex_unlock(&vm->mutex);
534 return ret;
535 }
536 }
537 mutex_unlock(&vm->mutex);
538
539 vma->vm = NULL;
540 nvkm_vm_ref(vm, &vma->vm, NULL);
541 vma->offset = (u64)vma->node->offset << 12;
542 vma->access = access;
543 return 0;
544 }
545
546 void
547 nvkm_vm_put(struct nvkm_vma *vma)
548 {
549 struct nvkm_mmu *mmu;
550 struct nvkm_vm *vm;
551 u32 fpde, lpde;
552
553 if (unlikely(vma->node == NULL))
554 return;
555 vm = vma->vm;
556 mmu = vm->mmu;
557
558 fpde = (vma->node->offset >> mmu->func->pgt_bits);
559 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
560
561 mutex_lock(&vm->mutex);
562 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
563 nvkm_mm_free(&vm->mm, &vma->node);
564 mutex_unlock(&vm->mutex);
565
566 nvkm_vm_ref(NULL, &vma->vm, NULL);
567 }
568
569 int
570 nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
571 {
572 struct nvkm_mmu *mmu = vm->mmu;
573 struct nvkm_memory *pgt;
574 int ret;
575
576 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
577 (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
578 if (ret == 0) {
579 vm->pgt[0].refcount[0] = 1;
580 vm->pgt[0].mem[0] = pgt;
581 nvkm_memory_boot(pgt, vm);
582 vm->bootstrapped = true;
583 }
584
585 return ret;
586 }
587
588 static int
589 nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
590 u32 block, struct nvkm_vm *vm)
591 {
592 u64 mm_length = (offset + length) - mm_offset;
593 int ret;
594
595 INIT_LIST_HEAD(&vm->pgd_list);
596 kref_init(&vm->refcount);
597 vm->fpde = offset >> (mmu->func->pgt_bits + 12);
598 vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
599
600 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
601 if (!vm->pgt) {
602 kfree(vm);
603 return -ENOMEM;
604 }
605
606 if (block > length)
607 block = length;
608
609 ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
610 block >> 12);
611 if (ret) {
612 vfree(vm->pgt);
613 return ret;
614 }
615
616 return 0;
617 }
618
619 int
620 nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
621 u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
622 {
623 static struct lock_class_key _key;
624 struct nvkm_vm *vm;
625 int ret;
626
627 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
628 if (!vm)
629 return -ENOMEM;
630
631 __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
632 vm->mmu = mmu;
633
634 ret = nvkm_vm_legacy(mmu, offset, length, mm_offset, block, vm);
635 if (ret) {
636 kfree(vm);
637 return ret;
638 }
639
640 *pvm = vm;
641 return 0;
642 }
643
644 int
645 nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
646 struct lock_class_key *key, struct nvkm_vm **pvm)
647 {
648 struct nvkm_mmu *mmu = device->mmu;
649
650 *pvm = NULL;
651 if (mmu->func->vmm.ctor) {
652 int ret = mmu->func->vmm.ctor(mmu, mm_offset,
653 offset + length - mm_offset,
654 NULL, 0, key, "legacy", pvm);
655 if (ret) {
656 nvkm_vm_ref(NULL, pvm, NULL);
657 return ret;
658 }
659
660 ret = nvkm_vm_legacy(mmu, offset, length, mm_offset,
661 (*pvm)->func->page_block ?
662 (*pvm)->func->page_block : 4096, *pvm);
663 if (ret)
664 nvkm_vm_ref(NULL, pvm, NULL);
665
666 return ret;
667 }
668
669 if (!mmu->func->create)
670 return -EINVAL;
671
672 return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
673 }
674
675 static int
676 nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
677 {
678 struct nvkm_mmu *mmu = vm->mmu;
679 struct nvkm_vm_pgd *vpgd;
680 int i;
681
682 if (!pgd)
683 return 0;
684
685 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
686 if (!vpgd)
687 return -ENOMEM;
688
689 vpgd->obj = pgd;
690
691 mutex_lock(&vm->mutex);
692 for (i = vm->fpde; i <= vm->lpde; i++)
693 mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
694 list_add(&vpgd->head, &vm->pgd_list);
695 mutex_unlock(&vm->mutex);
696 return 0;
697 }
698
699 static void
700 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
701 {
702 struct nvkm_vm_pgd *vpgd, *tmp;
703
704 if (!mpgd)
705 return;
706
707 mutex_lock(&vm->mutex);
708 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
709 if (vpgd->obj == mpgd) {
710 list_del(&vpgd->head);
711 kfree(vpgd);
712 break;
713 }
714 }
715 mutex_unlock(&vm->mutex);
716 }
717
718 static void
719 nvkm_vm_del(struct kref *kref)
720 {
721 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
722 struct nvkm_vm_pgd *vpgd, *tmp;
723
724 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
725 nvkm_vm_unlink(vm, vpgd->obj);
726 }
727
728 nvkm_mm_fini(&vm->mm);
729 vfree(vm->pgt);
730
731 if (vm->func)
732 nvkm_vmm_dtor(vm);
733 kfree(vm);
734 }
735
736 int
737 nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
738 {
739 if (ref) {
740 int ret = nvkm_vm_link(ref, pgd);
741 if (ret)
742 return ret;
743
744 kref_get(&ref->refcount);
745 }
746
747 if (*ptr) {
748 if ((*ptr)->bootstrapped && pgd)
749 nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
750 nvkm_vm_unlink(*ptr, pgd);
751 kref_put(&(*ptr)->refcount, nvkm_vm_del);
752 }
753
754 *ptr = ref;
755 return 0;
756 }
757
758 static int
759 nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
760 {
761 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
762
763 if (mmu->func->vmm.global) {
764 int ret = nvkm_vm_new(subdev->device, 0, mmu->limit, 0,
765 NULL, &mmu->vmm);
766 if (ret)
767 return ret;
768 }
769
770 if (mmu->func->oneinit)
771 return mmu->func->oneinit(mmu);
772
773 return 0;
774 }
775
776 static int
777 nvkm_mmu_init(struct nvkm_subdev *subdev)
778 {
779 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
780 if (mmu->func->init)
781 mmu->func->init(mmu);
782 return 0;
783 }
784
785 static void *
786 nvkm_mmu_dtor(struct nvkm_subdev *subdev)
787 {
788 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
789
790 nvkm_vm_ref(NULL, &mmu->vmm, NULL);
791
792 nvkm_mmu_ptc_fini(mmu);
793 return mmu;
794 }
795
796 static const struct nvkm_subdev_func
797 nvkm_mmu = {
798 .dtor = nvkm_mmu_dtor,
799 .oneinit = nvkm_mmu_oneinit,
800 .init = nvkm_mmu_init,
801 };
802
803 void
804 nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
805 int index, struct nvkm_mmu *mmu)
806 {
807 nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
808 mmu->func = func;
809 mmu->limit = func->limit;
810 mmu->dma_bits = func->dma_bits;
811 mmu->lpg_shift = func->lpg_shift;
812 nvkm_mmu_ptc_init(mmu);
813 }
814
815 int
816 nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
817 int index, struct nvkm_mmu **pmmu)
818 {
819 if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
820 return -ENOMEM;
821 nvkm_mmu_ctor(func, device, index, *pmmu);
822 return 0;
823 }