]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/gpu/drm/nouveau/nouveau_vm.c
PM / Runtime: Fix loops in pm_runtime_clk_notify()
[mirror_ubuntu-kernels.git] / drivers / gpu / drm / nouveau / nouveau_vm.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
28 #include "nouveau_vm.h"
29
30 void
31 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
32 {
33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (vm->pgt_bits - bits);
41 u32 end, len;
42
43 delta = 0;
44 list_for_each_entry(r, &node->regions, rl_entry) {
45 u64 phys = (u64)r->offset << 12;
46 u32 num = r->length >> bits;
47
48 while (num) {
49 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
50
51 end = (pte + num);
52 if (unlikely(end >= max))
53 end = max;
54 len = end - pte;
55
56 vm->map(vma, pgt, node, pte, len, phys, delta);
57
58 num -= len;
59 pte += len;
60 if (unlikely(end >= max)) {
61 pde++;
62 pte = 0;
63 }
64
65 delta += (u64)len << vma->node->type;
66 }
67 }
68
69 vm->flush(vm);
70 }
71
72 void
73 nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
74 {
75 nouveau_vm_map_at(vma, 0, node);
76 }
77
78 void
79 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
80 struct nouveau_mem *mem, dma_addr_t *list)
81 {
82 struct nouveau_vm *vm = vma->vm;
83 int big = vma->node->type != vm->spg_shift;
84 u32 offset = vma->node->offset + (delta >> 12);
85 u32 bits = vma->node->type - 12;
86 u32 num = length >> vma->node->type;
87 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
88 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
89 u32 max = 1 << (vm->pgt_bits - bits);
90 u32 end, len;
91
92 while (num) {
93 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
94
95 end = (pte + num);
96 if (unlikely(end >= max))
97 end = max;
98 len = end - pte;
99
100 vm->map_sg(vma, pgt, mem, pte, len, list);
101
102 num -= len;
103 pte += len;
104 list += len;
105 if (unlikely(end >= max)) {
106 pde++;
107 pte = 0;
108 }
109 }
110
111 vm->flush(vm);
112 }
113
114 void
115 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
116 {
117 struct nouveau_vm *vm = vma->vm;
118 int big = vma->node->type != vm->spg_shift;
119 u32 offset = vma->node->offset + (delta >> 12);
120 u32 bits = vma->node->type - 12;
121 u32 num = length >> vma->node->type;
122 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
123 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
124 u32 max = 1 << (vm->pgt_bits - bits);
125 u32 end, len;
126
127 while (num) {
128 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
129
130 end = (pte + num);
131 if (unlikely(end >= max))
132 end = max;
133 len = end - pte;
134
135 vm->unmap(pgt, pte, len);
136
137 num -= len;
138 pte += len;
139 if (unlikely(end >= max)) {
140 pde++;
141 pte = 0;
142 }
143 }
144
145 vm->flush(vm);
146 }
147
148 void
149 nouveau_vm_unmap(struct nouveau_vma *vma)
150 {
151 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
152 }
153
154 static void
155 nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
156 {
157 struct nouveau_vm_pgd *vpgd;
158 struct nouveau_vm_pgt *vpgt;
159 struct nouveau_gpuobj *pgt;
160 u32 pde;
161
162 for (pde = fpde; pde <= lpde; pde++) {
163 vpgt = &vm->pgt[pde - vm->fpde];
164 if (--vpgt->refcount[big])
165 continue;
166
167 pgt = vpgt->obj[big];
168 vpgt->obj[big] = NULL;
169
170 list_for_each_entry(vpgd, &vm->pgd_list, head) {
171 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
172 }
173
174 mutex_unlock(&vm->mm->mutex);
175 nouveau_gpuobj_ref(NULL, &pgt);
176 mutex_lock(&vm->mm->mutex);
177 }
178 }
179
180 static int
181 nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
182 {
183 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
184 struct nouveau_vm_pgd *vpgd;
185 struct nouveau_gpuobj *pgt;
186 int big = (type != vm->spg_shift);
187 u32 pgt_size;
188 int ret;
189
190 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
191 pgt_size *= 8;
192
193 mutex_unlock(&vm->mm->mutex);
194 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
195 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
196 mutex_lock(&vm->mm->mutex);
197 if (unlikely(ret))
198 return ret;
199
200 /* someone beat us to filling the PDE while we didn't have the lock */
201 if (unlikely(vpgt->refcount[big]++)) {
202 mutex_unlock(&vm->mm->mutex);
203 nouveau_gpuobj_ref(NULL, &pgt);
204 mutex_lock(&vm->mm->mutex);
205 return 0;
206 }
207
208 vpgt->obj[big] = pgt;
209 list_for_each_entry(vpgd, &vm->pgd_list, head) {
210 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
211 }
212
213 return 0;
214 }
215
216 int
217 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
218 u32 access, struct nouveau_vma *vma)
219 {
220 u32 align = (1 << page_shift) >> 12;
221 u32 msize = size >> 12;
222 u32 fpde, lpde, pde;
223 int ret;
224
225 mutex_lock(&vm->mm->mutex);
226 ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
227 if (unlikely(ret != 0)) {
228 mutex_unlock(&vm->mm->mutex);
229 return ret;
230 }
231
232 fpde = (vma->node->offset >> vm->pgt_bits);
233 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
234 for (pde = fpde; pde <= lpde; pde++) {
235 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
236 int big = (vma->node->type != vm->spg_shift);
237
238 if (likely(vpgt->refcount[big])) {
239 vpgt->refcount[big]++;
240 continue;
241 }
242
243 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
244 if (ret) {
245 if (pde != fpde)
246 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
247 nouveau_mm_put(vm->mm, vma->node);
248 mutex_unlock(&vm->mm->mutex);
249 vma->node = NULL;
250 return ret;
251 }
252 }
253 mutex_unlock(&vm->mm->mutex);
254
255 vma->vm = vm;
256 vma->offset = (u64)vma->node->offset << 12;
257 vma->access = access;
258 return 0;
259 }
260
261 void
262 nouveau_vm_put(struct nouveau_vma *vma)
263 {
264 struct nouveau_vm *vm = vma->vm;
265 u32 fpde, lpde;
266
267 if (unlikely(vma->node == NULL))
268 return;
269 fpde = (vma->node->offset >> vm->pgt_bits);
270 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
271
272 mutex_lock(&vm->mm->mutex);
273 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
274 nouveau_mm_put(vm->mm, vma->node);
275 vma->node = NULL;
276 mutex_unlock(&vm->mm->mutex);
277 }
278
279 int
280 nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
281 struct nouveau_vm **pvm)
282 {
283 struct drm_nouveau_private *dev_priv = dev->dev_private;
284 struct nouveau_vm *vm;
285 u64 mm_length = (offset + length) - mm_offset;
286 u32 block, pgt_bits;
287 int ret;
288
289 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
290 if (!vm)
291 return -ENOMEM;
292
293 if (dev_priv->card_type == NV_50) {
294 vm->map_pgt = nv50_vm_map_pgt;
295 vm->map = nv50_vm_map;
296 vm->map_sg = nv50_vm_map_sg;
297 vm->unmap = nv50_vm_unmap;
298 vm->flush = nv50_vm_flush;
299 vm->spg_shift = 12;
300 vm->lpg_shift = 16;
301
302 pgt_bits = 29;
303 block = (1 << pgt_bits);
304 if (length < block)
305 block = length;
306
307 } else
308 if (dev_priv->card_type == NV_C0) {
309 vm->map_pgt = nvc0_vm_map_pgt;
310 vm->map = nvc0_vm_map;
311 vm->map_sg = nvc0_vm_map_sg;
312 vm->unmap = nvc0_vm_unmap;
313 vm->flush = nvc0_vm_flush;
314 vm->spg_shift = 12;
315 vm->lpg_shift = 17;
316 pgt_bits = 27;
317 block = 4096;
318 } else {
319 kfree(vm);
320 return -ENOSYS;
321 }
322
323 vm->fpde = offset >> pgt_bits;
324 vm->lpde = (offset + length - 1) >> pgt_bits;
325 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
326 if (!vm->pgt) {
327 kfree(vm);
328 return -ENOMEM;
329 }
330
331 INIT_LIST_HEAD(&vm->pgd_list);
332 vm->dev = dev;
333 vm->refcount = 1;
334 vm->pgt_bits = pgt_bits - 12;
335
336 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
337 block >> 12);
338 if (ret) {
339 kfree(vm);
340 return ret;
341 }
342
343 *pvm = vm;
344 return 0;
345 }
346
347 static int
348 nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
349 {
350 struct nouveau_vm_pgd *vpgd;
351 int i;
352
353 if (!pgd)
354 return 0;
355
356 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
357 if (!vpgd)
358 return -ENOMEM;
359
360 nouveau_gpuobj_ref(pgd, &vpgd->obj);
361
362 mutex_lock(&vm->mm->mutex);
363 for (i = vm->fpde; i <= vm->lpde; i++)
364 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
365 list_add(&vpgd->head, &vm->pgd_list);
366 mutex_unlock(&vm->mm->mutex);
367 return 0;
368 }
369
370 static void
371 nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
372 {
373 struct nouveau_vm_pgd *vpgd, *tmp;
374
375 if (!pgd)
376 return;
377
378 mutex_lock(&vm->mm->mutex);
379 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
380 if (vpgd->obj != pgd)
381 continue;
382
383 list_del(&vpgd->head);
384 nouveau_gpuobj_ref(NULL, &vpgd->obj);
385 kfree(vpgd);
386 }
387 mutex_unlock(&vm->mm->mutex);
388 }
389
390 static void
391 nouveau_vm_del(struct nouveau_vm *vm)
392 {
393 struct nouveau_vm_pgd *vpgd, *tmp;
394
395 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
396 nouveau_vm_unlink(vm, vpgd->obj);
397 }
398 WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
399
400 kfree(vm->pgt);
401 kfree(vm);
402 }
403
404 int
405 nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
406 struct nouveau_gpuobj *pgd)
407 {
408 struct nouveau_vm *vm;
409 int ret;
410
411 vm = ref;
412 if (vm) {
413 ret = nouveau_vm_link(vm, pgd);
414 if (ret)
415 return ret;
416
417 vm->refcount++;
418 }
419
420 vm = *ptr;
421 *ptr = ref;
422
423 if (vm) {
424 nouveau_vm_unlink(vm, pgd);
425
426 if (--vm->refcount == 0)
427 nouveau_vm_del(vm);
428 }
429
430 return 0;
431 }