]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / base.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25
26 #include <core/gpuobj.h>
27 #include <subdev/fb.h>
28
29 void
30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
31 {
32 struct nvkm_vm *vm = vma->vm;
33 struct nvkm_mmu *mmu = vm->mmu;
34 struct nvkm_mm_node *r = node->mem;
35 int big = vma->node->type != mmu->func->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (mmu->func->pgt_bits - bits);
41 u32 end, len;
42
43 delta = 0;
44 while (r) {
45 u64 phys = (u64)r->offset << 12;
46 u32 num = r->length >> bits;
47
48 while (num) {
49 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
50
51 end = (pte + num);
52 if (unlikely(end >= max))
53 end = max;
54 len = end - pte;
55
56 mmu->func->map(vma, pgt, node, pte, len, phys, delta);
57
58 num -= len;
59 pte += len;
60 if (unlikely(end >= max)) {
61 phys += len << (bits + 12);
62 pde++;
63 pte = 0;
64 }
65
66 delta += (u64)len << vma->node->type;
67 }
68 r = r->next;
69 };
70
71 mmu->func->flush(vm);
72 }
73
74 static void
75 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
76 struct nvkm_mem *mem)
77 {
78 struct nvkm_vm *vm = vma->vm;
79 struct nvkm_mmu *mmu = vm->mmu;
80 int big = vma->node->type != mmu->func->spg_shift;
81 u32 offset = vma->node->offset + (delta >> 12);
82 u32 bits = vma->node->type - 12;
83 u32 num = length >> vma->node->type;
84 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
85 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
86 u32 max = 1 << (mmu->func->pgt_bits - bits);
87 unsigned m, sglen;
88 u32 end, len;
89 int i;
90 struct scatterlist *sg;
91
92 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
93 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
94 sglen = sg_dma_len(sg) >> PAGE_SHIFT;
95
96 end = pte + sglen;
97 if (unlikely(end >= max))
98 end = max;
99 len = end - pte;
100
101 for (m = 0; m < len; m++) {
102 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
103
104 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
105 num--;
106 pte++;
107
108 if (num == 0)
109 goto finish;
110 }
111 if (unlikely(end >= max)) {
112 pde++;
113 pte = 0;
114 }
115 if (m < sglen) {
116 for (; m < sglen; m++) {
117 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
118
119 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
120 num--;
121 pte++;
122 if (num == 0)
123 goto finish;
124 }
125 }
126
127 }
128 finish:
129 mmu->func->flush(vm);
130 }
131
132 static void
133 nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
134 struct nvkm_mem *mem)
135 {
136 struct nvkm_vm *vm = vma->vm;
137 struct nvkm_mmu *mmu = vm->mmu;
138 dma_addr_t *list = mem->pages;
139 int big = vma->node->type != mmu->func->spg_shift;
140 u32 offset = vma->node->offset + (delta >> 12);
141 u32 bits = vma->node->type - 12;
142 u32 num = length >> vma->node->type;
143 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
144 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
145 u32 max = 1 << (mmu->func->pgt_bits - bits);
146 u32 end, len;
147
148 while (num) {
149 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
150
151 end = (pte + num);
152 if (unlikely(end >= max))
153 end = max;
154 len = end - pte;
155
156 mmu->func->map_sg(vma, pgt, mem, pte, len, list);
157
158 num -= len;
159 pte += len;
160 list += len;
161 if (unlikely(end >= max)) {
162 pde++;
163 pte = 0;
164 }
165 }
166
167 mmu->func->flush(vm);
168 }
169
170 void
171 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
172 {
173 if (node->sg)
174 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
175 else
176 if (node->pages)
177 nvkm_vm_map_sg(vma, 0, node->size << 12, node);
178 else
179 nvkm_vm_map_at(vma, 0, node);
180 }
181
182 void
183 nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
184 {
185 struct nvkm_vm *vm = vma->vm;
186 struct nvkm_mmu *mmu = vm->mmu;
187 int big = vma->node->type != mmu->func->spg_shift;
188 u32 offset = vma->node->offset + (delta >> 12);
189 u32 bits = vma->node->type - 12;
190 u32 num = length >> vma->node->type;
191 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
192 u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
193 u32 max = 1 << (mmu->func->pgt_bits - bits);
194 u32 end, len;
195
196 while (num) {
197 struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
198
199 end = (pte + num);
200 if (unlikely(end >= max))
201 end = max;
202 len = end - pte;
203
204 mmu->func->unmap(vma, pgt, pte, len);
205
206 num -= len;
207 pte += len;
208 if (unlikely(end >= max)) {
209 pde++;
210 pte = 0;
211 }
212 }
213
214 mmu->func->flush(vm);
215 }
216
217 void
218 nvkm_vm_unmap(struct nvkm_vma *vma)
219 {
220 nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
221 }
222
223 static void
224 nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
225 {
226 struct nvkm_mmu *mmu = vm->mmu;
227 struct nvkm_vm_pgd *vpgd;
228 struct nvkm_vm_pgt *vpgt;
229 struct nvkm_memory *pgt;
230 u32 pde;
231
232 for (pde = fpde; pde <= lpde; pde++) {
233 vpgt = &vm->pgt[pde - vm->fpde];
234 if (--vpgt->refcount[big])
235 continue;
236
237 pgt = vpgt->mem[big];
238 vpgt->mem[big] = NULL;
239
240 list_for_each_entry(vpgd, &vm->pgd_list, head) {
241 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
242 }
243
244 mmu->func->flush(vm);
245
246 nvkm_memory_del(&pgt);
247 }
248 }
249
250 static int
251 nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
252 {
253 struct nvkm_mmu *mmu = vm->mmu;
254 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
255 struct nvkm_vm_pgd *vpgd;
256 int big = (type != mmu->func->spg_shift);
257 u32 pgt_size;
258 int ret;
259
260 pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
261 pgt_size *= 8;
262
263 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
264 pgt_size, 0x1000, true, &vpgt->mem[big]);
265 if (unlikely(ret))
266 return ret;
267
268 list_for_each_entry(vpgd, &vm->pgd_list, head) {
269 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
270 }
271
272 vpgt->refcount[big]++;
273 return 0;
274 }
275
276 int
277 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
278 struct nvkm_vma *vma)
279 {
280 struct nvkm_mmu *mmu = vm->mmu;
281 u32 align = (1 << page_shift) >> 12;
282 u32 msize = size >> 12;
283 u32 fpde, lpde, pde;
284 int ret;
285
286 mutex_lock(&vm->mutex);
287 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
288 &vma->node);
289 if (unlikely(ret != 0)) {
290 mutex_unlock(&vm->mutex);
291 return ret;
292 }
293
294 fpde = (vma->node->offset >> mmu->func->pgt_bits);
295 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
296
297 for (pde = fpde; pde <= lpde; pde++) {
298 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
299 int big = (vma->node->type != mmu->func->spg_shift);
300
301 if (likely(vpgt->refcount[big])) {
302 vpgt->refcount[big]++;
303 continue;
304 }
305
306 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
307 if (ret) {
308 if (pde != fpde)
309 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
310 nvkm_mm_free(&vm->mm, &vma->node);
311 mutex_unlock(&vm->mutex);
312 return ret;
313 }
314 }
315 mutex_unlock(&vm->mutex);
316
317 vma->vm = NULL;
318 nvkm_vm_ref(vm, &vma->vm, NULL);
319 vma->offset = (u64)vma->node->offset << 12;
320 vma->access = access;
321 return 0;
322 }
323
324 void
325 nvkm_vm_put(struct nvkm_vma *vma)
326 {
327 struct nvkm_mmu *mmu;
328 struct nvkm_vm *vm;
329 u32 fpde, lpde;
330
331 if (unlikely(vma->node == NULL))
332 return;
333 vm = vma->vm;
334 mmu = vm->mmu;
335
336 fpde = (vma->node->offset >> mmu->func->pgt_bits);
337 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
338
339 mutex_lock(&vm->mutex);
340 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
341 nvkm_mm_free(&vm->mm, &vma->node);
342 mutex_unlock(&vm->mutex);
343
344 nvkm_vm_ref(NULL, &vma->vm, NULL);
345 }
346
347 int
348 nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
349 {
350 struct nvkm_mmu *mmu = vm->mmu;
351 struct nvkm_memory *pgt;
352 int ret;
353
354 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
355 (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
356 if (ret == 0) {
357 vm->pgt[0].refcount[0] = 1;
358 vm->pgt[0].mem[0] = pgt;
359 nvkm_memory_boot(pgt, vm);
360 }
361
362 return ret;
363 }
364
365 int
366 nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
367 u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
368 {
369 static struct lock_class_key _key;
370 struct nvkm_vm *vm;
371 u64 mm_length = (offset + length) - mm_offset;
372 int ret;
373
374 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
375 if (!vm)
376 return -ENOMEM;
377
378 __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
379 INIT_LIST_HEAD(&vm->pgd_list);
380 vm->mmu = mmu;
381 kref_init(&vm->refcount);
382 vm->fpde = offset >> (mmu->func->pgt_bits + 12);
383 vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
384
385 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
386 if (!vm->pgt) {
387 kfree(vm);
388 return -ENOMEM;
389 }
390
391 ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
392 block >> 12);
393 if (ret) {
394 vfree(vm->pgt);
395 kfree(vm);
396 return ret;
397 }
398
399 *pvm = vm;
400
401 return 0;
402 }
403
404 int
405 nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
406 struct lock_class_key *key, struct nvkm_vm **pvm)
407 {
408 struct nvkm_mmu *mmu = device->mmu;
409 if (!mmu->func->create)
410 return -EINVAL;
411 return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
412 }
413
414 static int
415 nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
416 {
417 struct nvkm_mmu *mmu = vm->mmu;
418 struct nvkm_vm_pgd *vpgd;
419 int i;
420
421 if (!pgd)
422 return 0;
423
424 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
425 if (!vpgd)
426 return -ENOMEM;
427
428 vpgd->obj = pgd;
429
430 mutex_lock(&vm->mutex);
431 for (i = vm->fpde; i <= vm->lpde; i++)
432 mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
433 list_add(&vpgd->head, &vm->pgd_list);
434 mutex_unlock(&vm->mutex);
435 return 0;
436 }
437
438 static void
439 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
440 {
441 struct nvkm_vm_pgd *vpgd, *tmp;
442
443 if (!mpgd)
444 return;
445
446 mutex_lock(&vm->mutex);
447 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
448 if (vpgd->obj == mpgd) {
449 list_del(&vpgd->head);
450 kfree(vpgd);
451 break;
452 }
453 }
454 mutex_unlock(&vm->mutex);
455 }
456
457 static void
458 nvkm_vm_del(struct kref *kref)
459 {
460 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
461 struct nvkm_vm_pgd *vpgd, *tmp;
462
463 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
464 nvkm_vm_unlink(vm, vpgd->obj);
465 }
466
467 nvkm_mm_fini(&vm->mm);
468 vfree(vm->pgt);
469 kfree(vm);
470 }
471
472 int
473 nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
474 {
475 if (ref) {
476 int ret = nvkm_vm_link(ref, pgd);
477 if (ret)
478 return ret;
479
480 kref_get(&ref->refcount);
481 }
482
483 if (*ptr) {
484 nvkm_vm_unlink(*ptr, pgd);
485 kref_put(&(*ptr)->refcount, nvkm_vm_del);
486 }
487
488 *ptr = ref;
489 return 0;
490 }
491
492 static int
493 nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
494 {
495 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
496 if (mmu->func->oneinit)
497 return mmu->func->oneinit(mmu);
498 return 0;
499 }
500
501 static int
502 nvkm_mmu_init(struct nvkm_subdev *subdev)
503 {
504 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
505 if (mmu->func->init)
506 mmu->func->init(mmu);
507 return 0;
508 }
509
510 static void *
511 nvkm_mmu_dtor(struct nvkm_subdev *subdev)
512 {
513 struct nvkm_mmu *mmu = nvkm_mmu(subdev);
514 if (mmu->func->dtor)
515 return mmu->func->dtor(mmu);
516 return mmu;
517 }
518
519 static const struct nvkm_subdev_func
520 nvkm_mmu = {
521 .dtor = nvkm_mmu_dtor,
522 .oneinit = nvkm_mmu_oneinit,
523 .init = nvkm_mmu_init,
524 };
525
526 void
527 nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
528 int index, struct nvkm_mmu *mmu)
529 {
530 nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
531 mmu->func = func;
532 mmu->limit = func->limit;
533 mmu->dma_bits = func->dma_bits;
534 mmu->lpg_shift = func->lpg_shift;
535 }
536
537 int
538 nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
539 int index, struct nvkm_mmu **pmmu)
540 {
541 if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
542 return -ENOMEM;
543 nvkm_mmu_ctor(func, device, index, *pmmu);
544 return 0;
545 }