]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/gpuobj.h>
27 #include <subdev/fb.h>
30 nvkm_vm_map_at(struct nvkm_vma
*vma
, u64 delta
, struct nvkm_mem
*node
)
32 struct nvkm_vm
*vm
= vma
->vm
;
33 struct nvkm_mmu
*mmu
= vm
->mmu
;
34 struct nvkm_mm_node
*r
= node
->mem
;
35 int big
= vma
->node
->type
!= mmu
->func
->spg_shift
;
36 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
37 u32 bits
= vma
->node
->type
- 12;
38 u32 pde
= (offset
>> mmu
->func
->pgt_bits
) - vm
->fpde
;
39 u32 pte
= (offset
& ((1 << mmu
->func
->pgt_bits
) - 1)) >> bits
;
40 u32 max
= 1 << (mmu
->func
->pgt_bits
- bits
);
45 u64 phys
= (u64
)r
->offset
<< 12;
46 u32 num
= r
->length
>> bits
;
49 struct nvkm_memory
*pgt
= vm
->pgt
[pde
].mem
[big
];
52 if (unlikely(end
>= max
))
56 mmu
->func
->map(vma
, pgt
, node
, pte
, len
, phys
, delta
);
60 if (unlikely(end
>= max
)) {
61 phys
+= len
<< (bits
+ 12);
66 delta
+= (u64
)len
<< vma
->node
->type
;
75 nvkm_vm_map_sg_table(struct nvkm_vma
*vma
, u64 delta
, u64 length
,
78 struct nvkm_vm
*vm
= vma
->vm
;
79 struct nvkm_mmu
*mmu
= vm
->mmu
;
80 int big
= vma
->node
->type
!= mmu
->func
->spg_shift
;
81 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
82 u32 bits
= vma
->node
->type
- 12;
83 u32 num
= length
>> vma
->node
->type
;
84 u32 pde
= (offset
>> mmu
->func
->pgt_bits
) - vm
->fpde
;
85 u32 pte
= (offset
& ((1 << mmu
->func
->pgt_bits
) - 1)) >> bits
;
86 u32 max
= 1 << (mmu
->func
->pgt_bits
- bits
);
90 struct scatterlist
*sg
;
92 for_each_sg(mem
->sg
->sgl
, sg
, mem
->sg
->nents
, i
) {
93 struct nvkm_memory
*pgt
= vm
->pgt
[pde
].mem
[big
];
94 sglen
= sg_dma_len(sg
) >> PAGE_SHIFT
;
97 if (unlikely(end
>= max
))
101 for (m
= 0; m
< len
; m
++) {
102 dma_addr_t addr
= sg_dma_address(sg
) + (m
<< PAGE_SHIFT
);
104 mmu
->func
->map_sg(vma
, pgt
, mem
, pte
, 1, &addr
);
111 if (unlikely(end
>= max
)) {
116 for (; m
< sglen
; m
++) {
117 dma_addr_t addr
= sg_dma_address(sg
) + (m
<< PAGE_SHIFT
);
119 mmu
->func
->map_sg(vma
, pgt
, mem
, pte
, 1, &addr
);
129 mmu
->func
->flush(vm
);
133 nvkm_vm_map_sg(struct nvkm_vma
*vma
, u64 delta
, u64 length
,
134 struct nvkm_mem
*mem
)
136 struct nvkm_vm
*vm
= vma
->vm
;
137 struct nvkm_mmu
*mmu
= vm
->mmu
;
138 dma_addr_t
*list
= mem
->pages
;
139 int big
= vma
->node
->type
!= mmu
->func
->spg_shift
;
140 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
141 u32 bits
= vma
->node
->type
- 12;
142 u32 num
= length
>> vma
->node
->type
;
143 u32 pde
= (offset
>> mmu
->func
->pgt_bits
) - vm
->fpde
;
144 u32 pte
= (offset
& ((1 << mmu
->func
->pgt_bits
) - 1)) >> bits
;
145 u32 max
= 1 << (mmu
->func
->pgt_bits
- bits
);
149 struct nvkm_memory
*pgt
= vm
->pgt
[pde
].mem
[big
];
152 if (unlikely(end
>= max
))
156 mmu
->func
->map_sg(vma
, pgt
, mem
, pte
, len
, list
);
161 if (unlikely(end
>= max
)) {
167 mmu
->func
->flush(vm
);
171 nvkm_vm_map(struct nvkm_vma
*vma
, struct nvkm_mem
*node
)
174 nvkm_vm_map_sg_table(vma
, 0, node
->size
<< 12, node
);
177 nvkm_vm_map_sg(vma
, 0, node
->size
<< 12, node
);
179 nvkm_vm_map_at(vma
, 0, node
);
183 nvkm_vm_unmap_at(struct nvkm_vma
*vma
, u64 delta
, u64 length
)
185 struct nvkm_vm
*vm
= vma
->vm
;
186 struct nvkm_mmu
*mmu
= vm
->mmu
;
187 int big
= vma
->node
->type
!= mmu
->func
->spg_shift
;
188 u32 offset
= vma
->node
->offset
+ (delta
>> 12);
189 u32 bits
= vma
->node
->type
- 12;
190 u32 num
= length
>> vma
->node
->type
;
191 u32 pde
= (offset
>> mmu
->func
->pgt_bits
) - vm
->fpde
;
192 u32 pte
= (offset
& ((1 << mmu
->func
->pgt_bits
) - 1)) >> bits
;
193 u32 max
= 1 << (mmu
->func
->pgt_bits
- bits
);
197 struct nvkm_memory
*pgt
= vm
->pgt
[pde
].mem
[big
];
200 if (unlikely(end
>= max
))
204 mmu
->func
->unmap(vma
, pgt
, pte
, len
);
208 if (unlikely(end
>= max
)) {
214 mmu
->func
->flush(vm
);
218 nvkm_vm_unmap(struct nvkm_vma
*vma
)
220 nvkm_vm_unmap_at(vma
, 0, (u64
)vma
->node
->length
<< 12);
224 nvkm_vm_unmap_pgt(struct nvkm_vm
*vm
, int big
, u32 fpde
, u32 lpde
)
226 struct nvkm_mmu
*mmu
= vm
->mmu
;
227 struct nvkm_vm_pgd
*vpgd
;
228 struct nvkm_vm_pgt
*vpgt
;
229 struct nvkm_memory
*pgt
;
232 for (pde
= fpde
; pde
<= lpde
; pde
++) {
233 vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
234 if (--vpgt
->refcount
[big
])
237 pgt
= vpgt
->mem
[big
];
238 vpgt
->mem
[big
] = NULL
;
240 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
241 mmu
->func
->map_pgt(vpgd
->obj
, pde
, vpgt
->mem
);
244 nvkm_memory_del(&pgt
);
249 nvkm_vm_map_pgt(struct nvkm_vm
*vm
, u32 pde
, u32 type
)
251 struct nvkm_mmu
*mmu
= vm
->mmu
;
252 struct nvkm_vm_pgt
*vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
253 struct nvkm_vm_pgd
*vpgd
;
254 int big
= (type
!= mmu
->func
->spg_shift
);
258 pgt_size
= (1 << (mmu
->func
->pgt_bits
+ 12)) >> type
;
261 ret
= nvkm_memory_new(mmu
->subdev
.device
, NVKM_MEM_TARGET_INST
,
262 pgt_size
, 0x1000, true, &vpgt
->mem
[big
]);
266 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
267 mmu
->func
->map_pgt(vpgd
->obj
, pde
, vpgt
->mem
);
270 vpgt
->refcount
[big
]++;
275 nvkm_vm_get(struct nvkm_vm
*vm
, u64 size
, u32 page_shift
, u32 access
,
276 struct nvkm_vma
*vma
)
278 struct nvkm_mmu
*mmu
= vm
->mmu
;
279 u32 align
= (1 << page_shift
) >> 12;
280 u32 msize
= size
>> 12;
284 mutex_lock(&vm
->mutex
);
285 ret
= nvkm_mm_head(&vm
->mm
, 0, page_shift
, msize
, msize
, align
,
287 if (unlikely(ret
!= 0)) {
288 mutex_unlock(&vm
->mutex
);
292 fpde
= (vma
->node
->offset
>> mmu
->func
->pgt_bits
);
293 lpde
= (vma
->node
->offset
+ vma
->node
->length
- 1) >> mmu
->func
->pgt_bits
;
295 for (pde
= fpde
; pde
<= lpde
; pde
++) {
296 struct nvkm_vm_pgt
*vpgt
= &vm
->pgt
[pde
- vm
->fpde
];
297 int big
= (vma
->node
->type
!= mmu
->func
->spg_shift
);
299 if (likely(vpgt
->refcount
[big
])) {
300 vpgt
->refcount
[big
]++;
304 ret
= nvkm_vm_map_pgt(vm
, pde
, vma
->node
->type
);
307 nvkm_vm_unmap_pgt(vm
, big
, fpde
, pde
- 1);
308 nvkm_mm_free(&vm
->mm
, &vma
->node
);
309 mutex_unlock(&vm
->mutex
);
313 mutex_unlock(&vm
->mutex
);
316 nvkm_vm_ref(vm
, &vma
->vm
, NULL
);
317 vma
->offset
= (u64
)vma
->node
->offset
<< 12;
318 vma
->access
= access
;
323 nvkm_vm_put(struct nvkm_vma
*vma
)
325 struct nvkm_mmu
*mmu
;
329 if (unlikely(vma
->node
== NULL
))
334 fpde
= (vma
->node
->offset
>> mmu
->func
->pgt_bits
);
335 lpde
= (vma
->node
->offset
+ vma
->node
->length
- 1) >> mmu
->func
->pgt_bits
;
337 mutex_lock(&vm
->mutex
);
338 nvkm_vm_unmap_pgt(vm
, vma
->node
->type
!= mmu
->func
->spg_shift
, fpde
, lpde
);
339 nvkm_mm_free(&vm
->mm
, &vma
->node
);
340 mutex_unlock(&vm
->mutex
);
342 nvkm_vm_ref(NULL
, &vma
->vm
, NULL
);
346 nvkm_vm_boot(struct nvkm_vm
*vm
, u64 size
)
348 struct nvkm_mmu
*mmu
= vm
->mmu
;
349 struct nvkm_memory
*pgt
;
352 ret
= nvkm_memory_new(mmu
->subdev
.device
, NVKM_MEM_TARGET_INST
,
353 (size
>> mmu
->func
->spg_shift
) * 8, 0x1000, true, &pgt
);
355 vm
->pgt
[0].refcount
[0] = 1;
356 vm
->pgt
[0].mem
[0] = pgt
;
357 nvkm_memory_boot(pgt
, vm
);
364 nvkm_vm_create(struct nvkm_mmu
*mmu
, u64 offset
, u64 length
, u64 mm_offset
,
365 u32 block
, struct lock_class_key
*key
, struct nvkm_vm
**pvm
)
367 static struct lock_class_key _key
;
369 u64 mm_length
= (offset
+ length
) - mm_offset
;
372 vm
= kzalloc(sizeof(*vm
), GFP_KERNEL
);
376 __mutex_init(&vm
->mutex
, "&vm->mutex", key
? key
: &_key
);
377 INIT_LIST_HEAD(&vm
->pgd_list
);
379 kref_init(&vm
->refcount
);
380 vm
->fpde
= offset
>> (mmu
->func
->pgt_bits
+ 12);
381 vm
->lpde
= (offset
+ length
- 1) >> (mmu
->func
->pgt_bits
+ 12);
383 vm
->pgt
= vzalloc((vm
->lpde
- vm
->fpde
+ 1) * sizeof(*vm
->pgt
));
389 ret
= nvkm_mm_init(&vm
->mm
, mm_offset
>> 12, mm_length
>> 12,
403 nvkm_vm_new(struct nvkm_device
*device
, u64 offset
, u64 length
, u64 mm_offset
,
404 struct lock_class_key
*key
, struct nvkm_vm
**pvm
)
406 struct nvkm_mmu
*mmu
= device
->mmu
;
407 if (!mmu
->func
->create
)
409 return mmu
->func
->create(mmu
, offset
, length
, mm_offset
, key
, pvm
);
413 nvkm_vm_link(struct nvkm_vm
*vm
, struct nvkm_gpuobj
*pgd
)
415 struct nvkm_mmu
*mmu
= vm
->mmu
;
416 struct nvkm_vm_pgd
*vpgd
;
422 vpgd
= kzalloc(sizeof(*vpgd
), GFP_KERNEL
);
428 mutex_lock(&vm
->mutex
);
429 for (i
= vm
->fpde
; i
<= vm
->lpde
; i
++)
430 mmu
->func
->map_pgt(pgd
, i
, vm
->pgt
[i
- vm
->fpde
].mem
);
431 list_add(&vpgd
->head
, &vm
->pgd_list
);
432 mutex_unlock(&vm
->mutex
);
437 nvkm_vm_unlink(struct nvkm_vm
*vm
, struct nvkm_gpuobj
*mpgd
)
439 struct nvkm_vm_pgd
*vpgd
, *tmp
;
444 mutex_lock(&vm
->mutex
);
445 list_for_each_entry_safe(vpgd
, tmp
, &vm
->pgd_list
, head
) {
446 if (vpgd
->obj
== mpgd
) {
447 list_del(&vpgd
->head
);
452 mutex_unlock(&vm
->mutex
);
456 nvkm_vm_del(struct kref
*kref
)
458 struct nvkm_vm
*vm
= container_of(kref
, typeof(*vm
), refcount
);
459 struct nvkm_vm_pgd
*vpgd
, *tmp
;
461 list_for_each_entry_safe(vpgd
, tmp
, &vm
->pgd_list
, head
) {
462 nvkm_vm_unlink(vm
, vpgd
->obj
);
465 nvkm_mm_fini(&vm
->mm
);
471 nvkm_vm_ref(struct nvkm_vm
*ref
, struct nvkm_vm
**ptr
, struct nvkm_gpuobj
*pgd
)
474 int ret
= nvkm_vm_link(ref
, pgd
);
478 kref_get(&ref
->refcount
);
482 nvkm_vm_unlink(*ptr
, pgd
);
483 kref_put(&(*ptr
)->refcount
, nvkm_vm_del
);
491 nvkm_mmu_oneinit(struct nvkm_subdev
*subdev
)
493 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
494 if (mmu
->func
->oneinit
)
495 return mmu
->func
->oneinit(mmu
);
500 nvkm_mmu_init(struct nvkm_subdev
*subdev
)
502 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
504 mmu
->func
->init(mmu
);
509 nvkm_mmu_dtor(struct nvkm_subdev
*subdev
)
511 struct nvkm_mmu
*mmu
= nvkm_mmu(subdev
);
513 return mmu
->func
->dtor(mmu
);
517 static const struct nvkm_subdev_func
519 .dtor
= nvkm_mmu_dtor
,
520 .oneinit
= nvkm_mmu_oneinit
,
521 .init
= nvkm_mmu_init
,
525 nvkm_mmu_ctor(const struct nvkm_mmu_func
*func
, struct nvkm_device
*device
,
526 int index
, struct nvkm_mmu
*mmu
)
528 nvkm_subdev_ctor(&nvkm_mmu
, device
, index
, &mmu
->subdev
);
530 mmu
->limit
= func
->limit
;
531 mmu
->dma_bits
= func
->dma_bits
;
532 mmu
->lpg_shift
= func
->lpg_shift
;
536 nvkm_mmu_new_(const struct nvkm_mmu_func
*func
, struct nvkm_device
*device
,
537 int index
, struct nvkm_mmu
**pmmu
)
539 if (!(*pmmu
= kzalloc(sizeof(**pmmu
), GFP_KERNEL
)))
541 nvkm_mmu_ctor(func
, device
, index
, *pmmu
);