]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
9bf688df24f0188c5c491f63ec94a241ecad80c2
2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <subdev/fb.h>
29 #include <nvif/if500d.h>
30 #include <nvif/if900d.h>
33 struct nvkm_mmu_pt
*pt
;
34 struct list_head head
;
41 nvkm_mmu_ptp_put(struct nvkm_mmu
*mmu
, bool force
, struct nvkm_mmu_pt
*pt
)
43 const int slot
= pt
->base
>> pt
->ptp
->shift
;
44 struct nvkm_mmu_ptp
*ptp
= pt
->ptp
;
46 /* If there were no free slots in the parent allocation before,
47 * there will be now, so return PTP to the cache.
50 list_add(&ptp
->head
, &mmu
->ptp
.list
);
51 ptp
->free
|= BIT(slot
);
53 /* If there's no more sub-allocations, destroy PTP. */
54 if (ptp
->free
== ptp
->mask
) {
55 nvkm_mmu_ptc_put(mmu
, force
, &ptp
->pt
);
64 nvkm_mmu_ptp_get(struct nvkm_mmu
*mmu
, u32 size
, bool zero
)
66 struct nvkm_mmu_pt
*pt
;
67 struct nvkm_mmu_ptp
*ptp
;
70 if (!(pt
= kzalloc(sizeof(*pt
), GFP_KERNEL
)))
73 ptp
= list_first_entry_or_null(&mmu
->ptp
.list
, typeof(*ptp
), head
);
75 /* Need to allocate a new parent to sub-allocate from. */
76 if (!(ptp
= kmalloc(sizeof(*ptp
), GFP_KERNEL
))) {
81 ptp
->pt
= nvkm_mmu_ptc_get(mmu
, 0x1000, 0x1000, false);
88 ptp
->shift
= order_base_2(size
);
89 slot
= nvkm_memory_size(ptp
->pt
->memory
) >> ptp
->shift
;
90 ptp
->mask
= (1 << slot
) - 1;
91 ptp
->free
= ptp
->mask
;
92 list_add(&ptp
->head
, &mmu
->ptp
.list
);
97 /* Sub-allocate from parent object, removing PTP from cache
98 * if there's no more free slots left.
100 slot
= __ffs(ptp
->free
);
101 ptp
->free
&= ~BIT(slot
);
103 list_del(&ptp
->head
);
105 pt
->memory
= pt
->ptp
->pt
->memory
;
106 pt
->base
= slot
<< ptp
->shift
;
107 pt
->addr
= pt
->ptp
->pt
->addr
+ pt
->base
;
111 struct nvkm_mmu_ptc
{
112 struct list_head head
;
113 struct list_head item
;
118 static inline struct nvkm_mmu_ptc
*
119 nvkm_mmu_ptc_find(struct nvkm_mmu
*mmu
, u32 size
)
121 struct nvkm_mmu_ptc
*ptc
;
123 list_for_each_entry(ptc
, &mmu
->ptc
.list
, head
) {
124 if (ptc
->size
== size
)
128 ptc
= kmalloc(sizeof(*ptc
), GFP_KERNEL
);
130 INIT_LIST_HEAD(&ptc
->item
);
133 list_add(&ptc
->head
, &mmu
->ptc
.list
);
140 nvkm_mmu_ptc_put(struct nvkm_mmu
*mmu
, bool force
, struct nvkm_mmu_pt
**ppt
)
142 struct nvkm_mmu_pt
*pt
= *ppt
;
144 /* Handle sub-allocated page tables. */
146 mutex_lock(&mmu
->ptp
.mutex
);
147 nvkm_mmu_ptp_put(mmu
, force
, pt
);
148 mutex_unlock(&mmu
->ptp
.mutex
);
152 /* Either cache or free the object. */
153 mutex_lock(&mmu
->ptc
.mutex
);
154 if (pt
->ptc
->refs
< 8 /* Heuristic. */ && !force
) {
155 list_add_tail(&pt
->head
, &pt
->ptc
->item
);
158 nvkm_memory_unref(&pt
->memory
);
161 mutex_unlock(&mmu
->ptc
.mutex
);
166 nvkm_mmu_ptc_get(struct nvkm_mmu
*mmu
, u32 size
, u32 align
, bool zero
)
168 struct nvkm_mmu_ptc
*ptc
;
169 struct nvkm_mmu_pt
*pt
;
172 /* Sub-allocated page table (ie. GP100 LPT). */
173 if (align
< 0x1000) {
174 mutex_lock(&mmu
->ptp
.mutex
);
175 pt
= nvkm_mmu_ptp_get(mmu
, align
, zero
);
176 mutex_unlock(&mmu
->ptp
.mutex
);
180 /* Lookup cache for this page table size. */
181 mutex_lock(&mmu
->ptc
.mutex
);
182 ptc
= nvkm_mmu_ptc_find(mmu
, size
);
184 mutex_unlock(&mmu
->ptc
.mutex
);
188 /* If there's a free PT in the cache, reuse it. */
189 pt
= list_first_entry_or_null(&ptc
->item
, typeof(*pt
), head
);
192 nvkm_fo64(pt
->memory
, 0, 0, size
>> 3);
195 mutex_unlock(&mmu
->ptc
.mutex
);
198 mutex_unlock(&mmu
->ptc
.mutex
);
200 /* No such luck, we need to allocate. */
201 if (!(pt
= kmalloc(sizeof(*pt
), GFP_KERNEL
)))
206 ret
= nvkm_memory_new(mmu
->subdev
.device
, NVKM_MEM_TARGET_INST
,
207 size
, align
, zero
, &pt
->memory
);
214 pt
->addr
= nvkm_memory_addr(pt
->memory
);
219 nvkm_vm_map_(const struct nvkm_vmm_page
*page
, struct nvkm_vma
*vma
, u64 delta
,
220 struct nvkm_mem
*mem
, nvkm_vmm_pte_func fn
,
221 struct nvkm_vmm_map
*map
)
224 struct nv50_vmm_map_v0 nv50
;
225 struct gf100_vmm_map_v0 gf100
;
227 struct nvkm_vmm
*vmm
= vma
->vm
;
232 map
->memory
= mem
->memory
;
235 if (vmm
->func
->valid
) {
236 switch (vmm
->mmu
->subdev
.device
->card_type
) {
238 args
.nv50
.version
= 0;
239 args
.nv50
.ro
= !(vma
->access
& NV_MEM_ACCESS_WO
);
240 args
.nv50
.priv
= !!(vma
->access
& NV_MEM_ACCESS_SYS
);
241 args
.nv50
.kind
= (mem
->memtype
& 0x07f);
242 args
.nv50
.comp
= (mem
->memtype
& 0x180) >> 7;
244 argc
= sizeof(args
.nv50
);
250 args
.gf100
.version
= 0;
251 args
.gf100
.vol
= (nvkm_memory_target(map
->memory
) != NVKM_MEM_TARGET_VRAM
);
252 args
.gf100
.ro
= !(vma
->access
& NV_MEM_ACCESS_WO
);
253 args
.gf100
.priv
= !!(vma
->access
& NV_MEM_ACCESS_SYS
);
254 args
.gf100
.kind
= (mem
->memtype
& 0x0ff);
256 argc
= sizeof(args
.gf100
);
263 ret
= vmm
->func
->valid(vmm
, argv
, argc
, map
);
268 mutex_lock(&vmm
->mutex
);
269 nvkm_vmm_ptes_map(vmm
, page
, vma
->node
->addr
+ delta
,
270 vma
->node
->size
, map
, fn
);
271 mutex_unlock(&vmm
->mutex
);
273 nvkm_memory_tags_put(vma
->node
->memory
, vmm
->mmu
->subdev
.device
, &vma
->node
->tags
);
274 nvkm_memory_unref(&vma
->node
->memory
);
275 vma
->node
->memory
= nvkm_memory_ref(map
->memory
);
276 vma
->node
->tags
= map
->tags
;
280 nvkm_mmu_ptc_dump(struct nvkm_mmu
*mmu
)
282 struct nvkm_mmu_ptc
*ptc
;
283 list_for_each_entry(ptc
, &mmu
->ptc
.list
, head
) {
284 struct nvkm_mmu_pt
*pt
, *tt
;
285 list_for_each_entry_safe(pt
, tt
, &ptc
->item
, head
) {
286 nvkm_memory_unref(&pt
->memory
);
294 nvkm_mmu_ptc_fini(struct nvkm_mmu
*mmu
)
296 struct nvkm_mmu_ptc
*ptc
, *ptct
;
298 list_for_each_entry_safe(ptc
, ptct
, &mmu
->ptc
.list
, head
) {
299 WARN_ON(!list_empty(&ptc
->item
));
300 list_del(&ptc
->head
);
306 nvkm_mmu_ptc_init(struct nvkm_mmu
*mmu
)
308 mutex_init(&mmu
->ptc
.mutex
);
309 INIT_LIST_HEAD(&mmu
->ptc
.list
);
310 mutex_init(&mmu
->ptp
.mutex
);
311 INIT_LIST_HEAD(&mmu
->ptp
.list
);
315 nvkm_vm_map_at(struct nvkm_vma
*vma
, u64 delta
, struct nvkm_mem
*node
)
317 const struct nvkm_vmm_page
*page
= &vma
->vm
->func
->page
[vma
->node
->page
];
318 if (page
->desc
->func
->unmap
) {
319 struct nvkm_vmm_map map
= { .mem
= node
->mem
};
320 nvkm_vm_map_(page
, vma
, delta
, node
, page
->desc
->func
->mem
, &map
);
326 nvkm_vm_map_sg_table(struct nvkm_vma
*vma
, u64 delta
, u64 length
,
327 struct nvkm_mem
*mem
)
329 const struct nvkm_vmm_page
*page
= &vma
->vm
->func
->page
[vma
->node
->page
];
330 if (page
->desc
->func
->unmap
) {
331 struct nvkm_vmm_map map
= { .sgl
= mem
->sg
->sgl
};
332 nvkm_vm_map_(page
, vma
, delta
, mem
, page
->desc
->func
->sgl
, &map
);
338 nvkm_vm_map_sg(struct nvkm_vma
*vma
, u64 delta
, u64 length
,
339 struct nvkm_mem
*mem
)
341 const struct nvkm_vmm_page
*page
= &vma
->vm
->func
->page
[vma
->node
->page
];
342 if (page
->desc
->func
->unmap
) {
343 struct nvkm_vmm_map map
= { .dma
= mem
->pages
};
344 nvkm_vm_map_(page
, vma
, delta
, mem
, page
->desc
->func
->dma
, &map
);
350 nvkm_vm_map(struct nvkm_vma
*vma
, struct nvkm_mem
*node
)
353 nvkm_vm_map_sg_table(vma
, 0, node
->size
<< 12, node
);
356 nvkm_vm_map_sg(vma
, 0, node
->size
<< 12, node
);
358 nvkm_vm_map_at(vma
, 0, node
);
362 nvkm_vm_unmap(struct nvkm_vma
*vma
)
364 nvkm_vmm_unmap(vma
->vm
, vma
->node
);
368 nvkm_vm_get(struct nvkm_vm
*vm
, u64 size
, u32 page_shift
, u32 access
,
369 struct nvkm_vma
*vma
)
373 mutex_lock(&vm
->mutex
);
374 ret
= nvkm_vmm_get_locked(vm
, true, false, false, page_shift
, 0,
376 mutex_unlock(&vm
->mutex
);
383 nvkm_vm_ref(vm
, &vma
->vm
, NULL
);
384 vma
->offset
= vma
->addr
= vma
->node
->addr
;
385 vma
->access
= access
;
390 nvkm_vm_put(struct nvkm_vma
*vma
)
392 nvkm_vmm_put(vma
->vm
, &vma
->node
);
393 nvkm_vm_ref(NULL
, &vma
->vm
, NULL
);
397 nvkm_vm_boot(struct nvkm_vm
*vm
, u64 size
)
399 return nvkm_vmm_boot(vm
);
403 nvkm_vm_new(struct nvkm_device
*device
, u64 offset
, u64 length
, u64 mm_offset
,
404 struct lock_class_key
*key
, struct nvkm_vm
**pvm
)
406 struct nvkm_mmu
*mmu
= device
->mmu
;
409 if (mmu
->func
->vmm
.ctor
) {
410 int ret
= mmu
->func
->vmm
.ctor(mmu
, mm_offset
,
411 offset
+ length
- mm_offset
,
412 NULL
, 0, key
, "legacy", pvm
);
414 nvkm_vm_ref(NULL
, pvm
, NULL
);
425 nvkm_vm_ref(struct nvkm_vm
*ref
, struct nvkm_vm
**ptr
, struct nvkm_memory
*inst
)
429 int ret
= nvkm_vmm_join(ref
, inst
);
438 nvkm_vmm_part(*ptr
, inst
);
447 nvkm_mmu_oneinit(struct nvkm_subdev
*subdev
)
449 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
451 if (mmu
->func
->vmm
.global
) {
452 int ret
= nvkm_vmm_new(subdev
->device
, 0, 0, NULL
, 0, NULL
,
462 nvkm_mmu_init(struct nvkm_subdev
*subdev
)
464 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
466 mmu
->func
->init(mmu
);
471 nvkm_mmu_dtor(struct nvkm_subdev
*subdev
)
473 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
475 nvkm_vmm_unref(&mmu
->vmm
);
477 nvkm_mmu_ptc_fini(mmu
);
481 static const struct nvkm_subdev_func
483 .dtor
= nvkm_mmu_dtor
,
484 .oneinit
= nvkm_mmu_oneinit
,
485 .init
= nvkm_mmu_init
,
489 nvkm_mmu_ctor(const struct nvkm_mmu_func
*func
, struct nvkm_device
*device
,
490 int index
, struct nvkm_mmu
*mmu
)
492 nvkm_subdev_ctor(&nvkm_mmu
, device
, index
, &mmu
->subdev
);
494 mmu
->limit
= func
->limit
;
495 mmu
->dma_bits
= func
->dma_bits
;
496 mmu
->lpg_shift
= func
->lpg_shift
;
497 nvkm_mmu_ptc_init(mmu
);
501 nvkm_mmu_new_(const struct nvkm_mmu_func
*func
, struct nvkm_device
*device
,
502 int index
, struct nvkm_mmu
**pmmu
)
504 if (!(*pmmu
= kzalloc(sizeof(**pmmu
), GFP_KERNEL
)))
506 nvkm_mmu_ctor(func
, device
, index
, *pmmu
);