]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/gpuobj.h>
28 #include <subdev/fb.h>
31 struct nvkm_mmu_pt
*pt
;
32 struct list_head head
;
39 nvkm_mmu_ptp_put(struct nvkm_mmu
*mmu
, bool force
, struct nvkm_mmu_pt
*pt
)
41 const int slot
= pt
->base
>> pt
->ptp
->shift
;
42 struct nvkm_mmu_ptp
*ptp
= pt
->ptp
;
44 /* If there were no free slots in the parent allocation before,
45 * there will be now, so return PTP to the cache.
48 list_add(&ptp
->head
, &mmu
->ptp
.list
);
49 ptp
->free
|= BIT(slot
);
51 /* If there's no more sub-allocations, destroy PTP. */
52 if (ptp
->free
== ptp
->mask
) {
53 nvkm_mmu_ptc_put(mmu
, force
, &ptp
->pt
);
62 nvkm_mmu_ptp_get(struct nvkm_mmu
*mmu
, u32 size
, bool zero
)
64 struct nvkm_mmu_pt
*pt
;
65 struct nvkm_mmu_ptp
*ptp
;
68 if (!(pt
= kzalloc(sizeof(*pt
), GFP_KERNEL
)))
71 ptp
= list_first_entry_or_null(&mmu
->ptp
.list
, typeof(*ptp
), head
);
73 /* Need to allocate a new parent to sub-allocate from. */
74 if (!(ptp
= kmalloc(sizeof(*ptp
), GFP_KERNEL
))) {
79 ptp
->pt
= nvkm_mmu_ptc_get(mmu
, 0x1000, 0x1000, false);
86 ptp
->shift
= order_base_2(size
);
87 slot
= nvkm_memory_size(ptp
->pt
->memory
) >> ptp
->shift
;
88 ptp
->mask
= (1 << slot
) - 1;
89 ptp
->free
= ptp
->mask
;
90 list_add(&ptp
->head
, &mmu
->ptp
.list
);
95 /* Sub-allocate from parent object, removing PTP from cache
96 * if there's no more free slots left.
98 slot
= __ffs(ptp
->free
);
99 ptp
->free
&= ~BIT(slot
);
101 list_del(&ptp
->head
);
103 pt
->memory
= pt
->ptp
->pt
->memory
;
104 pt
->base
= slot
<< ptp
->shift
;
105 pt
->addr
= pt
->ptp
->pt
->addr
+ pt
->base
;
109 struct nvkm_mmu_ptc
{
110 struct list_head head
;
111 struct list_head item
;
116 static inline struct nvkm_mmu_ptc
*
117 nvkm_mmu_ptc_find(struct nvkm_mmu
*mmu
, u32 size
)
119 struct nvkm_mmu_ptc
*ptc
;
121 list_for_each_entry(ptc
, &mmu
->ptc
.list
, head
) {
122 if (ptc
->size
== size
)
126 ptc
= kmalloc(sizeof(*ptc
), GFP_KERNEL
);
128 INIT_LIST_HEAD(&ptc
->item
);
131 list_add(&ptc
->head
, &mmu
->ptc
.list
);
138 nvkm_mmu_ptc_put(struct nvkm_mmu
*mmu
, bool force
, struct nvkm_mmu_pt
**ppt
)
140 struct nvkm_mmu_pt
*pt
= *ppt
;
142 /* Handle sub-allocated page tables. */
144 mutex_lock(&mmu
->ptp
.mutex
);
145 nvkm_mmu_ptp_put(mmu
, force
, pt
);
146 mutex_unlock(&mmu
->ptp
.mutex
);
150 /* Either cache or free the object. */
151 mutex_lock(&mmu
->ptc
.mutex
);
152 if (pt
->ptc
->refs
< 8 /* Heuristic. */ && !force
) {
153 list_add_tail(&pt
->head
, &pt
->ptc
->item
);
156 nvkm_memory_unref(&pt
->memory
);
159 mutex_unlock(&mmu
->ptc
.mutex
);
164 nvkm_mmu_ptc_get(struct nvkm_mmu
*mmu
, u32 size
, u32 align
, bool zero
)
166 struct nvkm_mmu_ptc
*ptc
;
167 struct nvkm_mmu_pt
*pt
;
170 /* Sub-allocated page table (ie. GP100 LPT). */
171 if (align
< 0x1000) {
172 mutex_lock(&mmu
->ptp
.mutex
);
173 pt
= nvkm_mmu_ptp_get(mmu
, align
, zero
);
174 mutex_unlock(&mmu
->ptp
.mutex
);
178 /* Lookup cache for this page table size. */
179 mutex_lock(&mmu
->ptc
.mutex
);
180 ptc
= nvkm_mmu_ptc_find(mmu
, size
);
182 mutex_unlock(&mmu
->ptc
.mutex
);
186 /* If there's a free PT in the cache, reuse it. */
187 pt
= list_first_entry_or_null(&ptc
->item
, typeof(*pt
), head
);
190 nvkm_fo64(pt
->memory
, 0, 0, size
>> 3);
193 mutex_unlock(&mmu
->ptc
.mutex
);
196 mutex_unlock(&mmu
->ptc
.mutex
);
198 /* No such luck, we need to allocate. */
199 if (!(pt
= kmalloc(sizeof(*pt
), GFP_KERNEL
)))
204 ret
= nvkm_memory_new(mmu
->subdev
.device
, NVKM_MEM_TARGET_INST
,
205 size
, align
, zero
, &pt
->memory
);
212 pt
->addr
= nvkm_memory_addr(pt
->memory
);
217 nvkm_mmu_ptc_dump(struct nvkm_mmu
*mmu
)
219 struct nvkm_mmu_ptc
*ptc
;
220 list_for_each_entry(ptc
, &mmu
->ptc
.list
, head
) {
221 struct nvkm_mmu_pt
*pt
, *tt
;
222 list_for_each_entry_safe(pt
, tt
, &ptc
->item
, head
) {
223 nvkm_memory_unref(&pt
->memory
);
231 nvkm_mmu_ptc_fini(struct nvkm_mmu
*mmu
)
233 struct nvkm_mmu_ptc
*ptc
, *ptct
;
235 list_for_each_entry_safe(ptc
, ptct
, &mmu
->ptc
.list
, head
) {
236 WARN_ON(!list_empty(&ptc
->item
));
237 list_del(&ptc
->head
);
243 nvkm_mmu_ptc_init(struct nvkm_mmu
*mmu
)
245 mutex_init(&mmu
->ptc
.mutex
);
246 INIT_LIST_HEAD(&mmu
->ptc
.list
);
247 mutex_init(&mmu
->ptp
.mutex
);
248 INIT_LIST_HEAD(&mmu
->ptp
.list
);
252 nvkm_vm_map_at(struct nvkm_vma
*vma
, u64 delta
, struct nvkm_mem
*node
)
254 struct nvkm_vm
*vm
= vma
->vm
;
255 struct nvkm_mmu
*mmu
= vm
->mmu
;
256 struct nvkm_mm_node
*r
= node
->mem
;
257 int big
= vma
->node
->type
!= mmu
->func
->spg_shift
;
258 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
259 u32 bits
= vma
->node
->type
- 12;
260 u32 pde
= (offset
>> mmu
->func
->pgt_bits
) - vm
->fpde
;
261 u32 pte
= (offset
& ((1 << mmu
->func
->pgt_bits
) - 1)) >> bits
;
262 u32 max
= 1 << (mmu
->func
->pgt_bits
- bits
);
267 u64 phys
= (u64
)r
->offset
<< 12;
268 u32 num
= r
->length
>> bits
;
271 struct nvkm_memory
*pgt
= vm
->pgt
[pde
].mem
[big
];
274 if (unlikely(end
>= max
))
278 mmu
->func
->map(vma
, pgt
, node
, pte
, len
, phys
, delta
);
282 if (unlikely(end
>= max
)) {
283 phys
+= len
<< (bits
+ 12);
288 delta
+= (u64
)len
<< vma
->node
->type
;
293 mmu
->func
->flush(vm
);
297 nvkm_vm_map_sg_table(struct nvkm_vma
*vma
, u64 delta
, u64 length
,
298 struct nvkm_mem
*mem
)
300 struct nvkm_vm
*vm
= vma
->vm
;
301 struct nvkm_mmu
*mmu
= vm
->mmu
;
302 int big
= vma
->node
->type
!= mmu
->func
->spg_shift
;
303 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
304 u32 bits
= vma
->node
->type
- 12;
305 u32 num
= length
>> vma
->node
->type
;
306 u32 pde
= (offset
>> mmu
->func
->pgt_bits
) - vm
->fpde
;
307 u32 pte
= (offset
& ((1 << mmu
->func
->pgt_bits
) - 1)) >> bits
;
308 u32 max
= 1 << (mmu
->func
->pgt_bits
- bits
);
312 struct scatterlist
*sg
;
314 for_each_sg(mem
->sg
->sgl
, sg
, mem
->sg
->nents
, i
) {
315 struct nvkm_memory
*pgt
= vm
->pgt
[pde
].mem
[big
];
316 sglen
= sg_dma_len(sg
) >> PAGE_SHIFT
;
319 if (unlikely(end
>= max
))
323 for (m
= 0; m
< len
; m
++) {
324 dma_addr_t addr
= sg_dma_address(sg
) + (m
<< PAGE_SHIFT
);
326 mmu
->func
->map_sg(vma
, pgt
, mem
, pte
, 1, &addr
);
333 if (unlikely(end
>= max
)) {
338 for (; m
< sglen
; m
++) {
339 dma_addr_t addr
= sg_dma_address(sg
) + (m
<< PAGE_SHIFT
);
341 mmu
->func
->map_sg(vma
, pgt
, mem
, pte
, 1, &addr
);
351 mmu
->func
->flush(vm
);
355 nvkm_vm_map_sg(struct nvkm_vma
*vma
, u64 delta
, u64 length
,
356 struct nvkm_mem
*mem
)
358 struct nvkm_vm
*vm
= vma
->vm
;
359 struct nvkm_mmu
*mmu
= vm
->mmu
;
360 dma_addr_t
*list
= mem
->pages
;
361 int big
= vma
->node
->type
!= mmu
->func
->spg_shift
;
362 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
363 u32 bits
= vma
->node
->type
- 12;
364 u32 num
= length
>> vma
->node
->type
;
365 u32 pde
= (offset
>> mmu
->func
->pgt_bits
) - vm
->fpde
;
366 u32 pte
= (offset
& ((1 << mmu
->func
->pgt_bits
) - 1)) >> bits
;
367 u32 max
= 1 << (mmu
->func
->pgt_bits
- bits
);
371 struct nvkm_memory
*pgt
= vm
->pgt
[pde
].mem
[big
];
374 if (unlikely(end
>= max
))
378 mmu
->func
->map_sg(vma
, pgt
, mem
, pte
, len
, list
);
383 if (unlikely(end
>= max
)) {
389 mmu
->func
->flush(vm
);
393 nvkm_vm_map(struct nvkm_vma
*vma
, struct nvkm_mem
*node
)
396 nvkm_vm_map_sg_table(vma
, 0, node
->size
<< 12, node
);
399 nvkm_vm_map_sg(vma
, 0, node
->size
<< 12, node
);
401 nvkm_vm_map_at(vma
, 0, node
);
405 nvkm_vm_unmap_at(struct nvkm_vma
*vma
, u64 delta
, u64 length
)
407 struct nvkm_vm
*vm
= vma
->vm
;
408 struct nvkm_mmu
*mmu
= vm
->mmu
;
409 int big
= vma
->node
->type
!= mmu
->func
->spg_shift
;
410 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
411 u32 bits
= vma
->node
->type
- 12;
412 u32 num
= length
>> vma
->node
->type
;
413 u32 pde
= (offset
>> mmu
->func
->pgt_bits
) - vm
->fpde
;
414 u32 pte
= (offset
& ((1 << mmu
->func
->pgt_bits
) - 1)) >> bits
;
415 u32 max
= 1 << (mmu
->func
->pgt_bits
- bits
);
419 struct nvkm_memory
*pgt
= vm
->pgt
[pde
].mem
[big
];
422 if (unlikely(end
>= max
))
426 mmu
->func
->unmap(vma
, pgt
, pte
, len
);
430 if (unlikely(end
>= max
)) {
436 mmu
->func
->flush(vm
);
440 nvkm_vm_unmap(struct nvkm_vma
*vma
)
442 nvkm_vm_unmap_at(vma
, 0, (u64
)vma
->node
->length
<< 12);
446 nvkm_vm_unmap_pgt(struct nvkm_vm
*vm
, int big
, u32 fpde
, u32 lpde
)
448 struct nvkm_mmu
*mmu
= vm
->mmu
;
449 struct nvkm_vm_pgd
*vpgd
;
450 struct nvkm_vm_pgt
*vpgt
;
451 struct nvkm_memory
*pgt
;
454 for (pde
= fpde
; pde
<= lpde
; pde
++) {
455 vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
456 if (--vpgt
->refcount
[big
])
459 pgt
= vpgt
->mem
[big
];
460 vpgt
->mem
[big
] = NULL
;
462 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
463 mmu
->func
->map_pgt(vpgd
->obj
, pde
, vpgt
->mem
);
466 mmu
->func
->flush(vm
);
468 nvkm_memory_unref(&pgt
);
473 nvkm_vm_map_pgt(struct nvkm_vm
*vm
, u32 pde
, u32 type
)
475 struct nvkm_mmu
*mmu
= vm
->mmu
;
476 struct nvkm_vm_pgt
*vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
477 struct nvkm_vm_pgd
*vpgd
;
478 int big
= (type
!= mmu
->func
->spg_shift
);
482 pgt_size
= (1 << (mmu
->func
->pgt_bits
+ 12)) >> type
;
485 ret
= nvkm_memory_new(mmu
->subdev
.device
, NVKM_MEM_TARGET_INST
,
486 pgt_size
, 0x1000, true, &vpgt
->mem
[big
]);
490 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
491 mmu
->func
->map_pgt(vpgd
->obj
, pde
, vpgt
->mem
);
494 vpgt
->refcount
[big
]++;
499 nvkm_vm_get(struct nvkm_vm
*vm
, u64 size
, u32 page_shift
, u32 access
,
500 struct nvkm_vma
*vma
)
502 struct nvkm_mmu
*mmu
= vm
->mmu
;
503 u32 align
= (1 << page_shift
) >> 12;
504 u32 msize
= size
>> 12;
508 mutex_lock(&vm
->mutex
);
509 ret
= nvkm_mm_head(&vm
->mm
, 0, page_shift
, msize
, msize
, align
,
511 if (unlikely(ret
!= 0)) {
512 mutex_unlock(&vm
->mutex
);
516 fpde
= (vma
->node
->offset
>> mmu
->func
->pgt_bits
);
517 lpde
= (vma
->node
->offset
+ vma
->node
->length
- 1) >> mmu
->func
->pgt_bits
;
519 for (pde
= fpde
; pde
<= lpde
; pde
++) {
520 struct nvkm_vm_pgt
*vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
521 int big
= (vma
->node
->type
!= mmu
->func
->spg_shift
);
523 if (likely(vpgt
->refcount
[big
])) {
524 vpgt
->refcount
[big
]++;
528 ret
= nvkm_vm_map_pgt(vm
, pde
, vma
->node
->type
);
531 nvkm_vm_unmap_pgt(vm
, big
, fpde
, pde
- 1);
532 nvkm_mm_free(&vm
->mm
, &vma
->node
);
533 mutex_unlock(&vm
->mutex
);
537 mutex_unlock(&vm
->mutex
);
540 nvkm_vm_ref(vm
, &vma
->vm
, NULL
);
541 vma
->offset
= (u64
)vma
->node
->offset
<< 12;
542 vma
->access
= access
;
547 nvkm_vm_put(struct nvkm_vma
*vma
)
549 struct nvkm_mmu
*mmu
;
553 if (unlikely(vma
->node
== NULL
))
558 fpde
= (vma
->node
->offset
>> mmu
->func
->pgt_bits
);
559 lpde
= (vma
->node
->offset
+ vma
->node
->length
- 1) >> mmu
->func
->pgt_bits
;
561 mutex_lock(&vm
->mutex
);
562 nvkm_vm_unmap_pgt(vm
, vma
->node
->type
!= mmu
->func
->spg_shift
, fpde
, lpde
);
563 nvkm_mm_free(&vm
->mm
, &vma
->node
);
564 mutex_unlock(&vm
->mutex
);
566 nvkm_vm_ref(NULL
, &vma
->vm
, NULL
);
570 nvkm_vm_boot(struct nvkm_vm
*vm
, u64 size
)
572 struct nvkm_mmu
*mmu
= vm
->mmu
;
573 struct nvkm_memory
*pgt
;
576 ret
= nvkm_memory_new(mmu
->subdev
.device
, NVKM_MEM_TARGET_INST
,
577 (size
>> mmu
->func
->spg_shift
) * 8, 0x1000, true, &pgt
);
579 vm
->pgt
[0].refcount
[0] = 1;
580 vm
->pgt
[0].mem
[0] = pgt
;
581 nvkm_memory_boot(pgt
, vm
);
582 vm
->bootstrapped
= true;
589 nvkm_vm_legacy(struct nvkm_mmu
*mmu
, u64 offset
, u64 length
, u64 mm_offset
,
590 u32 block
, struct nvkm_vm
*vm
)
592 u64 mm_length
= (offset
+ length
) - mm_offset
;
595 INIT_LIST_HEAD(&vm
->pgd_list
);
596 kref_init(&vm
->refcount
);
597 vm
->fpde
= offset
>> (mmu
->func
->pgt_bits
+ 12);
598 vm
->lpde
= (offset
+ length
- 1) >> (mmu
->func
->pgt_bits
+ 12);
600 vm
->pgt
= vzalloc((vm
->lpde
- vm
->fpde
+ 1) * sizeof(*vm
->pgt
));
609 ret
= nvkm_mm_init(&vm
->mm
, 0, mm_offset
>> 12, mm_length
>> 12,
620 nvkm_vm_create(struct nvkm_mmu
*mmu
, u64 offset
, u64 length
, u64 mm_offset
,
621 u32 block
, struct lock_class_key
*key
, struct nvkm_vm
**pvm
)
623 static struct lock_class_key _key
;
627 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
631 __mutex_init(&vm
->mutex
, "&vm->mutex", key
? key
: &_key
);
634 ret
= nvkm_vm_legacy(mmu
, offset
, length
, mm_offset
, block
, vm
);
645 nvkm_vm_new(struct nvkm_device
*device
, u64 offset
, u64 length
, u64 mm_offset
,
646 struct lock_class_key
*key
, struct nvkm_vm
**pvm
)
648 struct nvkm_mmu
*mmu
= device
->mmu
;
651 if (mmu
->func
->vmm
.ctor
) {
652 int ret
= mmu
->func
->vmm
.ctor(mmu
, mm_offset
,
653 offset
+ length
- mm_offset
,
654 NULL
, 0, key
, "legacy", pvm
);
656 nvkm_vm_ref(NULL
, pvm
, NULL
);
660 ret
= nvkm_vm_legacy(mmu
, offset
, length
, mm_offset
,
661 (*pvm
)->func
->page_block
?
662 (*pvm
)->func
->page_block
: 4096, *pvm
);
664 nvkm_vm_ref(NULL
, pvm
, NULL
);
669 if (!mmu
->func
->create
)
672 return mmu
->func
->create(mmu
, offset
, length
, mm_offset
, key
, pvm
);
676 nvkm_vm_link(struct nvkm_vm
*vm
, struct nvkm_gpuobj
*pgd
)
678 struct nvkm_mmu
*mmu
= vm
->mmu
;
679 struct nvkm_vm_pgd
*vpgd
;
685 vpgd
= kzalloc(sizeof(*vpgd
), GFP_KERNEL
);
691 mutex_lock(&vm
->mutex
);
692 for (i
= vm
->fpde
; i
<= vm
->lpde
; i
++)
693 mmu
->func
->map_pgt(pgd
, i
, vm
->pgt
[i
- vm
->fpde
].mem
);
694 list_add(&vpgd
->head
, &vm
->pgd_list
);
695 mutex_unlock(&vm
->mutex
);
700 nvkm_vm_unlink(struct nvkm_vm
*vm
, struct nvkm_gpuobj
*mpgd
)
702 struct nvkm_vm_pgd
*vpgd
, *tmp
;
707 mutex_lock(&vm
->mutex
);
708 list_for_each_entry_safe(vpgd
, tmp
, &vm
->pgd_list
, head
) {
709 if (vpgd
->obj
== mpgd
) {
710 list_del(&vpgd
->head
);
715 mutex_unlock(&vm
->mutex
);
719 nvkm_vm_del(struct kref
*kref
)
721 struct nvkm_vm
*vm
= container_of(kref
, typeof(*vm
), refcount
);
722 struct nvkm_vm_pgd
*vpgd
, *tmp
;
724 list_for_each_entry_safe(vpgd
, tmp
, &vm
->pgd_list
, head
) {
725 nvkm_vm_unlink(vm
, vpgd
->obj
);
728 nvkm_mm_fini(&vm
->mm
);
737 nvkm_vm_ref(struct nvkm_vm
*ref
, struct nvkm_vm
**ptr
, struct nvkm_gpuobj
*pgd
)
740 int ret
= nvkm_vm_link(ref
, pgd
);
744 kref_get(&ref
->refcount
);
748 if ((*ptr
)->bootstrapped
&& pgd
)
749 nvkm_memory_unref(&(*ptr
)->pgt
[0].mem
[0]);
750 nvkm_vm_unlink(*ptr
, pgd
);
751 kref_put(&(*ptr
)->refcount
, nvkm_vm_del
);
759 nvkm_mmu_oneinit(struct nvkm_subdev
*subdev
)
761 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
763 if (mmu
->func
->vmm
.global
) {
764 int ret
= nvkm_vm_new(subdev
->device
, 0, mmu
->limit
, 0,
770 if (mmu
->func
->oneinit
)
771 return mmu
->func
->oneinit(mmu
);
777 nvkm_mmu_init(struct nvkm_subdev
*subdev
)
779 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
781 mmu
->func
->init(mmu
);
786 nvkm_mmu_dtor(struct nvkm_subdev
*subdev
)
788 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
790 nvkm_vm_ref(NULL
, &mmu
->vmm
, NULL
);
792 nvkm_mmu_ptc_fini(mmu
);
796 static const struct nvkm_subdev_func
798 .dtor
= nvkm_mmu_dtor
,
799 .oneinit
= nvkm_mmu_oneinit
,
800 .init
= nvkm_mmu_init
,
804 nvkm_mmu_ctor(const struct nvkm_mmu_func
*func
, struct nvkm_device
*device
,
805 int index
, struct nvkm_mmu
*mmu
)
807 nvkm_subdev_ctor(&nvkm_mmu
, device
, index
, &mmu
->subdev
);
809 mmu
->limit
= func
->limit
;
810 mmu
->dma_bits
= func
->dma_bits
;
811 mmu
->lpg_shift
= func
->lpg_shift
;
812 nvkm_mmu_ptc_init(mmu
);
816 nvkm_mmu_new_(const struct nvkm_mmu_func
*func
, struct nvkm_device
*device
,
817 int index
, struct nvkm_mmu
**pmmu
)
819 if (!(*pmmu
= kzalloc(sizeof(**pmmu
), GFP_KERNEL
)))
821 nvkm_mmu_ctor(func
, device
, index
, *pmmu
);