]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/nouveau/nouveau_bo.c
nouveau: add PRIME support
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
b1e5f172 31#include "ttm/ttm_page_alloc.h"
6ee73861
BS
32
33#include "nouveau_drm.h"
34#include "nouveau_drv.h"
35#include "nouveau_dma.h"
f869ef88
BS
36#include "nouveau_mm.h"
37#include "nouveau_vm.h"
6ee73861 38
a510604d 39#include <linux/log2.h>
5a0e3ad6 40#include <linux/slab.h>
a510604d 41
6ee73861
BS
42static void
43nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44{
45 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 46 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
47 struct nouveau_bo *nvbo = nouveau_bo(bo);
48
6ee73861
BS
49 if (unlikely(nvbo->gem))
50 DRM_ERROR("bo %p still attached to GEM object\n", bo);
51
a5cf68b0 52 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
53 kfree(nvbo);
54}
55
a0af9add 56static void
db5c8e29 57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 58 int *align, int *size)
a0af9add 59{
bfd83aca 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
a0af9add 61
573a2a37 62 if (dev_priv->card_type < NV_50) {
bfd83aca 63 if (nvbo->tile_mode) {
a0af9add
FJ
64 if (dev_priv->chipset >= 0x40) {
65 *align = 65536;
bfd83aca 66 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
67
68 } else if (dev_priv->chipset >= 0x30) {
69 *align = 32768;
bfd83aca 70 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
71
72 } else if (dev_priv->chipset >= 0x20) {
73 *align = 16384;
bfd83aca 74 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
75
76 } else if (dev_priv->chipset >= 0x10) {
77 *align = 16384;
bfd83aca 78 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
79 }
80 }
bfd83aca 81 } else {
f91bac5b
BS
82 *size = roundup(*size, (1 << nvbo->page_shift));
83 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
84 }
85
1c7059e4 86 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
87}
88
6ee73861 89int
7375c95b
BS
90nouveau_bo_new(struct drm_device *dev, int size, int align,
91 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
22b33e8e 92 struct sg_table *sg,
7375c95b 93 struct nouveau_bo **pnvbo)
6ee73861
BS
94{
95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct nouveau_bo *nvbo;
57de4ba9 97 size_t acc_size;
f91bac5b 98 int ret;
22b33e8e
DA
99 int type = ttm_bo_type_device;
100
101 if (sg)
102 type = ttm_bo_type_sg;
6ee73861
BS
103
104 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
105 if (!nvbo)
106 return -ENOMEM;
107 INIT_LIST_HEAD(&nvbo->head);
108 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 109 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
110 nvbo->tile_mode = tile_mode;
111 nvbo->tile_flags = tile_flags;
699ddfd9 112 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 113
f91bac5b
BS
114 nvbo->page_shift = 12;
115 if (dev_priv->bar1_vm) {
116 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
117 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
118 }
119
120 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
121 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
122 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 123
57de4ba9
JG
124 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
125 sizeof(struct nouveau_bo));
126
6ee73861 127 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
22b33e8e
DA
128 type, &nvbo->placement,
129 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
fd2871af 130 nouveau_bo_del_ttm);
6ee73861
BS
131 if (ret) {
132 /* ttm will call nouveau_bo_del_ttm if it fails.. */
133 return ret;
134 }
135
6ee73861
BS
136 *pnvbo = nvbo;
137 return 0;
138}
139
78ad0f7b
FJ
140static void
141set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
142{
143 *n = 0;
144
145 if (type & TTM_PL_FLAG_VRAM)
146 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
147 if (type & TTM_PL_FLAG_TT)
148 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
149 if (type & TTM_PL_FLAG_SYSTEM)
150 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
151}
152
699ddfd9
FJ
153static void
154set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
155{
156 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
812f219a 157 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
699ddfd9
FJ
158
159 if (dev_priv->card_type == NV_10 &&
812f219a 160 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 161 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
162 /*
163 * Make sure that the color and depth buffers are handled
164 * by independent memory controller units. Up to a 9x
165 * speed up when alpha-blending and depth-test are enabled
166 * at the same time.
167 */
699ddfd9
FJ
168 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
169 nvbo->placement.fpfn = vram_pages / 2;
170 nvbo->placement.lpfn = ~0;
171 } else {
172 nvbo->placement.fpfn = 0;
173 nvbo->placement.lpfn = vram_pages / 2;
174 }
175 }
176}
177
6ee73861 178void
78ad0f7b 179nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 180{
78ad0f7b
FJ
181 struct ttm_placement *pl = &nvbo->placement;
182 uint32_t flags = TTM_PL_MASK_CACHING |
183 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
184
185 pl->placement = nvbo->placements;
186 set_placement_list(nvbo->placements, &pl->num_placement,
187 type, flags);
188
189 pl->busy_placement = nvbo->busy_placements;
190 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
191 type | busy, flags);
699ddfd9
FJ
192
193 set_placement_range(nvbo, type);
6ee73861
BS
194}
195
196int
197nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
198{
199 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
200 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 201 int ret;
6ee73861
BS
202
203 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
204 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
205 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
206 1 << bo->mem.mem_type, memtype);
207 return -EINVAL;
208 }
209
210 if (nvbo->pin_refcnt++)
211 return 0;
212
213 ret = ttm_bo_reserve(bo, false, false, false, 0);
214 if (ret)
215 goto out;
216
78ad0f7b 217 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 218
7a45d764 219 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
220 if (ret == 0) {
221 switch (bo->mem.mem_type) {
222 case TTM_PL_VRAM:
223 dev_priv->fb_aper_free -= bo->mem.size;
224 break;
225 case TTM_PL_TT:
226 dev_priv->gart_info.aper_free -= bo->mem.size;
227 break;
228 default:
229 break;
230 }
231 }
232 ttm_bo_unreserve(bo);
233out:
234 if (unlikely(ret))
235 nvbo->pin_refcnt--;
236 return ret;
237}
238
239int
240nouveau_bo_unpin(struct nouveau_bo *nvbo)
241{
242 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
243 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 244 int ret;
6ee73861
BS
245
246 if (--nvbo->pin_refcnt)
247 return 0;
248
249 ret = ttm_bo_reserve(bo, false, false, false, 0);
250 if (ret)
251 return ret;
252
78ad0f7b 253 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 254
7a45d764 255 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
256 if (ret == 0) {
257 switch (bo->mem.mem_type) {
258 case TTM_PL_VRAM:
259 dev_priv->fb_aper_free += bo->mem.size;
260 break;
261 case TTM_PL_TT:
262 dev_priv->gart_info.aper_free += bo->mem.size;
263 break;
264 default:
265 break;
266 }
267 }
268
269 ttm_bo_unreserve(bo);
270 return ret;
271}
272
273int
274nouveau_bo_map(struct nouveau_bo *nvbo)
275{
276 int ret;
277
278 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
279 if (ret)
280 return ret;
281
282 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
283 ttm_bo_unreserve(&nvbo->bo);
284 return ret;
285}
286
287void
288nouveau_bo_unmap(struct nouveau_bo *nvbo)
289{
9d59e8a1
BS
290 if (nvbo)
291 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
292}
293
7a45d764
BS
294int
295nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
296 bool no_wait_reserve, bool no_wait_gpu)
297{
298 int ret;
299
300 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
301 no_wait_reserve, no_wait_gpu);
302 if (ret)
303 return ret;
304
305 return 0;
306}
307
6ee73861
BS
308u16
309nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
310{
311 bool is_iomem;
312 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
313 mem = &mem[index];
314 if (is_iomem)
315 return ioread16_native((void __force __iomem *)mem);
316 else
317 return *mem;
318}
319
320void
321nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
322{
323 bool is_iomem;
324 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
325 mem = &mem[index];
326 if (is_iomem)
327 iowrite16_native(val, (void __force __iomem *)mem);
328 else
329 *mem = val;
330}
331
332u32
333nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
334{
335 bool is_iomem;
336 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
337 mem = &mem[index];
338 if (is_iomem)
339 return ioread32_native((void __force __iomem *)mem);
340 else
341 return *mem;
342}
343
344void
345nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
346{
347 bool is_iomem;
348 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
349 mem = &mem[index];
350 if (is_iomem)
351 iowrite32_native(val, (void __force __iomem *)mem);
352 else
353 *mem = val;
354}
355
649bf3ca
JG
356static struct ttm_tt *
357nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
358 unsigned long size, uint32_t page_flags,
359 struct page *dummy_read_page)
6ee73861
BS
360{
361 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
362 struct drm_device *dev = dev_priv->dev;
363
364 switch (dev_priv->gart_info.type) {
b694dfb2 365#if __OS_HAS_AGP
6ee73861 366 case NOUVEAU_GART_AGP:
649bf3ca
JG
367 return ttm_agp_tt_create(bdev, dev->agp->bridge,
368 size, page_flags, dummy_read_page);
b694dfb2 369#endif
58e6c7a9
BS
370 case NOUVEAU_GART_PDMA:
371 case NOUVEAU_GART_HW:
649bf3ca
JG
372 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
373 dummy_read_page);
6ee73861
BS
374 default:
375 NV_ERROR(dev, "Unknown GART type %d\n",
376 dev_priv->gart_info.type);
377 break;
378 }
379
380 return NULL;
381}
382
383static int
384nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
385{
386 /* We'll do this from user space. */
387 return 0;
388}
389
390static int
391nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
392 struct ttm_mem_type_manager *man)
393{
394 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
395 struct drm_device *dev = dev_priv->dev;
396
397 switch (type) {
398 case TTM_PL_SYSTEM:
399 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
400 man->available_caching = TTM_PL_MASK_CACHING;
401 man->default_caching = TTM_PL_FLAG_CACHED;
402 break;
403 case TTM_PL_VRAM:
8984e046 404 if (dev_priv->card_type >= NV_50) {
573a2a37 405 man->func = &nouveau_vram_manager;
f869ef88
BS
406 man->io_reserve_fastpath = false;
407 man->use_io_reserve_lru = true;
408 } else {
573a2a37 409 man->func = &ttm_bo_manager_func;
f869ef88 410 }
6ee73861 411 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 412 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
413 man->available_caching = TTM_PL_FLAG_UNCACHED |
414 TTM_PL_FLAG_WC;
415 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
416 break;
417 case TTM_PL_TT:
26c0c9e3
BS
418 if (dev_priv->card_type >= NV_50)
419 man->func = &nouveau_gart_manager;
420 else
421 man->func = &ttm_bo_manager_func;
6ee73861
BS
422 switch (dev_priv->gart_info.type) {
423 case NOUVEAU_GART_AGP:
f32f02fd 424 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
425 man->available_caching = TTM_PL_FLAG_UNCACHED |
426 TTM_PL_FLAG_WC;
427 man->default_caching = TTM_PL_FLAG_WC;
6ee73861 428 break;
58e6c7a9
BS
429 case NOUVEAU_GART_PDMA:
430 case NOUVEAU_GART_HW:
6ee73861
BS
431 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
432 TTM_MEMTYPE_FLAG_CMA;
433 man->available_caching = TTM_PL_MASK_CACHING;
434 man->default_caching = TTM_PL_FLAG_CACHED;
435 break;
436 default:
437 NV_ERROR(dev, "Unknown GART type: %d\n",
438 dev_priv->gart_info.type);
439 return -EINVAL;
440 }
6ee73861
BS
441 break;
442 default:
443 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
444 return -EINVAL;
445 }
446 return 0;
447}
448
449static void
450nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
451{
452 struct nouveau_bo *nvbo = nouveau_bo(bo);
453
454 switch (bo->mem.mem_type) {
22fbd538 455 case TTM_PL_VRAM:
78ad0f7b
FJ
456 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
457 TTM_PL_FLAG_SYSTEM);
22fbd538 458 break;
6ee73861 459 default:
78ad0f7b 460 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
461 break;
462 }
22fbd538
FJ
463
464 *pl = nvbo->placement;
6ee73861
BS
465}
466
467
468/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
469 * TTM_PL_{VRAM,TT} directly.
470 */
a0af9add 471
6ee73861
BS
472static int
473nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
474 struct nouveau_bo *nvbo, bool evict,
475 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
476 struct ttm_mem_reg *new_mem)
477{
478 struct nouveau_fence *fence = NULL;
479 int ret;
480
481 ret = nouveau_fence_new(chan, &fence, true);
482 if (ret)
483 return ret;
484
64798817 485 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 486 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 487 nouveau_fence_unref(&fence);
6ee73861
BS
488 return ret;
489}
490
183720b8
BS
491static int
492nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
493 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
494{
d2f96666
BS
495 struct nouveau_mem *node = old_mem->mm_node;
496 u64 src_offset = node->vma[0].offset;
497 u64 dst_offset = node->vma[1].offset;
183720b8
BS
498 u32 page_count = new_mem->num_pages;
499 int ret;
500
183720b8
BS
501 page_count = new_mem->num_pages;
502 while (page_count) {
503 int line_count = (page_count > 2047) ? 2047 : page_count;
504
505 ret = RING_SPACE(chan, 12);
506 if (ret)
507 return ret;
508
509 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
510 OUT_RING (chan, upper_32_bits(dst_offset));
511 OUT_RING (chan, lower_32_bits(dst_offset));
512 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
513 OUT_RING (chan, upper_32_bits(src_offset));
514 OUT_RING (chan, lower_32_bits(src_offset));
515 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
516 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
517 OUT_RING (chan, PAGE_SIZE); /* line_length */
518 OUT_RING (chan, line_count);
519 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
520 OUT_RING (chan, 0x00100110);
521
522 page_count -= line_count;
523 src_offset += (PAGE_SIZE * line_count);
524 dst_offset += (PAGE_SIZE * line_count);
525 }
526
527 return 0;
528}
529
6ee73861 530static int
f1ab0cc9
BS
531nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
532 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 533{
d2f96666 534 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9
BS
535 struct nouveau_bo *nvbo = nouveau_bo(bo);
536 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
537 u64 src_offset = node->vma[0].offset;
538 u64 dst_offset = node->vma[1].offset;
6ee73861
BS
539 int ret;
540
f1ab0cc9
BS
541 while (length) {
542 u32 amount, stride, height;
543
5220b3c1
BS
544 amount = min(length, (u64)(4 * 1024 * 1024));
545 stride = 16 * 4;
f1ab0cc9
BS
546 height = amount / stride;
547
f13b3263
FJ
548 if (new_mem->mem_type == TTM_PL_VRAM &&
549 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
550 ret = RING_SPACE(chan, 8);
551 if (ret)
552 return ret;
553
554 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
555 OUT_RING (chan, 0);
5220b3c1 556 OUT_RING (chan, 0);
f1ab0cc9
BS
557 OUT_RING (chan, stride);
558 OUT_RING (chan, height);
559 OUT_RING (chan, 1);
560 OUT_RING (chan, 0);
561 OUT_RING (chan, 0);
562 } else {
563 ret = RING_SPACE(chan, 2);
564 if (ret)
565 return ret;
566
567 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
568 OUT_RING (chan, 1);
569 }
f13b3263
FJ
570 if (old_mem->mem_type == TTM_PL_VRAM &&
571 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
572 ret = RING_SPACE(chan, 8);
573 if (ret)
574 return ret;
575
576 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
577 OUT_RING (chan, 0);
5220b3c1 578 OUT_RING (chan, 0);
f1ab0cc9
BS
579 OUT_RING (chan, stride);
580 OUT_RING (chan, height);
581 OUT_RING (chan, 1);
582 OUT_RING (chan, 0);
583 OUT_RING (chan, 0);
584 } else {
585 ret = RING_SPACE(chan, 2);
586 if (ret)
587 return ret;
588
589 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
590 OUT_RING (chan, 1);
591 }
592
593 ret = RING_SPACE(chan, 14);
6ee73861
BS
594 if (ret)
595 return ret;
f1ab0cc9
BS
596
597 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
598 OUT_RING (chan, upper_32_bits(src_offset));
599 OUT_RING (chan, upper_32_bits(dst_offset));
600 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
601 OUT_RING (chan, lower_32_bits(src_offset));
602 OUT_RING (chan, lower_32_bits(dst_offset));
603 OUT_RING (chan, stride);
604 OUT_RING (chan, stride);
605 OUT_RING (chan, stride);
606 OUT_RING (chan, height);
607 OUT_RING (chan, 0x00000101);
608 OUT_RING (chan, 0x00000000);
609 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
610 OUT_RING (chan, 0);
611
612 length -= amount;
613 src_offset += amount;
614 dst_offset += amount;
6ee73861
BS
615 }
616
f1ab0cc9
BS
617 return 0;
618}
619
a6704788
BS
620static inline uint32_t
621nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
622 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
623{
624 if (mem->mem_type == TTM_PL_TT)
625 return chan->gart_handle;
626 return chan->vram_handle;
627}
628
f1ab0cc9
BS
629static int
630nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
631 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
632{
d961db75
BS
633 u32 src_offset = old_mem->start << PAGE_SHIFT;
634 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
635 u32 page_count = new_mem->num_pages;
636 int ret;
637
638 ret = RING_SPACE(chan, 3);
639 if (ret)
640 return ret;
641
642 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
643 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
644 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
645
6ee73861
BS
646 page_count = new_mem->num_pages;
647 while (page_count) {
648 int line_count = (page_count > 2047) ? 2047 : page_count;
649
6ee73861
BS
650 ret = RING_SPACE(chan, 11);
651 if (ret)
652 return ret;
f1ab0cc9 653
6ee73861
BS
654 BEGIN_RING(chan, NvSubM2MF,
655 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
656 OUT_RING (chan, src_offset);
657 OUT_RING (chan, dst_offset);
658 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
659 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
660 OUT_RING (chan, PAGE_SIZE); /* line_length */
661 OUT_RING (chan, line_count);
662 OUT_RING (chan, 0x00000101);
663 OUT_RING (chan, 0x00000000);
6ee73861 664 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 665 OUT_RING (chan, 0);
6ee73861
BS
666
667 page_count -= line_count;
668 src_offset += (PAGE_SIZE * line_count);
669 dst_offset += (PAGE_SIZE * line_count);
670 }
671
f1ab0cc9
BS
672 return 0;
673}
674
d2f96666
BS
675static int
676nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
677 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
678{
679 struct nouveau_mem *node = mem->mm_node;
680 int ret;
681
682 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
683 node->page_shift, NV_MEM_ACCESS_RO, vma);
684 if (ret)
685 return ret;
686
687 if (mem->mem_type == TTM_PL_VRAM)
688 nouveau_vm_map(vma, node);
689 else
f7b24c42 690 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
d2f96666
BS
691
692 return 0;
693}
694
f1ab0cc9
BS
695static int
696nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
697 bool no_wait_reserve, bool no_wait_gpu,
698 struct ttm_mem_reg *new_mem)
699{
700 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
accf9496 701 struct nouveau_channel *chan = chan = dev_priv->channel;
f1ab0cc9 702 struct nouveau_bo *nvbo = nouveau_bo(bo);
3425df48 703 struct ttm_mem_reg *old_mem = &bo->mem;
f1ab0cc9
BS
704 int ret;
705
accf9496 706 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
f1ab0cc9 707
d2f96666
BS
708 /* create temporary vmas for the transfer and attach them to the
709 * old nouveau_mem node, these will get cleaned up after ttm has
710 * destroyed the ttm_mem_reg
3425df48 711 */
26c0c9e3 712 if (dev_priv->card_type >= NV_50) {
d5f42394 713 struct nouveau_mem *node = old_mem->mm_node;
3425df48 714
d2f96666
BS
715 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
716 if (ret)
717 goto out;
718
719 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
720 if (ret)
721 goto out;
3425df48
BS
722 }
723
f1ab0cc9
BS
724 if (dev_priv->card_type < NV_50)
725 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
726 else
183720b8 727 if (dev_priv->card_type < NV_C0)
f1ab0cc9 728 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
183720b8
BS
729 else
730 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
731 if (ret == 0) {
732 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
733 no_wait_reserve,
734 no_wait_gpu, new_mem);
735 }
f1ab0cc9 736
3425df48 737out:
accf9496 738 mutex_unlock(&chan->mutex);
6a6b73f2 739 return ret;
6ee73861
BS
740}
741
742static int
743nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
744 bool no_wait_reserve, bool no_wait_gpu,
745 struct ttm_mem_reg *new_mem)
6ee73861
BS
746{
747 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
748 struct ttm_placement placement;
749 struct ttm_mem_reg tmp_mem;
750 int ret;
751
752 placement.fpfn = placement.lpfn = 0;
753 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 754 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
755
756 tmp_mem = *new_mem;
757 tmp_mem.mm_node = NULL;
9d87fa21 758 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
759 if (ret)
760 return ret;
761
762 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
763 if (ret)
764 goto out;
765
9d87fa21 766 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
767 if (ret)
768 goto out;
769
b8884da6 770 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 771out:
42311ff9 772 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
773 return ret;
774}
775
776static int
777nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
778 bool no_wait_reserve, bool no_wait_gpu,
779 struct ttm_mem_reg *new_mem)
6ee73861
BS
780{
781 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
782 struct ttm_placement placement;
783 struct ttm_mem_reg tmp_mem;
784 int ret;
785
786 placement.fpfn = placement.lpfn = 0;
787 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 788 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
789
790 tmp_mem = *new_mem;
791 tmp_mem.mm_node = NULL;
9d87fa21 792 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
793 if (ret)
794 return ret;
795
b8884da6 796 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
797 if (ret)
798 goto out;
799
b8884da6 800 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
801 if (ret)
802 goto out;
803
804out:
42311ff9 805 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
806 return ret;
807}
808
a4154bbf
BS
809static void
810nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
811{
a4154bbf 812 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
813 struct nouveau_vma *vma;
814
9f1feed2
BS
815 /* ttm can now (stupidly) pass the driver bos it didn't create... */
816 if (bo->destroy != nouveau_bo_del_ttm)
817 return;
818
fd2871af 819 list_for_each_entry(vma, &nvbo->vma_list, head) {
dc97b340 820 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
fd2871af
BS
821 nouveau_vm_map(vma, new_mem->mm_node);
822 } else
dc97b340 823 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
fd2871af 824 nvbo->page_shift == vma->vm->spg_shift) {
22b33e8e
DA
825 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
826 nouveau_vm_map_sg_table(vma, 0, new_mem->
827 num_pages << PAGE_SHIFT,
828 new_mem->mm_node);
829 else
830 nouveau_vm_map_sg(vma, 0, new_mem->
831 num_pages << PAGE_SHIFT,
832 new_mem->mm_node);
fd2871af
BS
833 } else {
834 nouveau_vm_unmap(vma);
835 }
a4154bbf
BS
836 }
837}
838
6ee73861 839static int
a0af9add
FJ
840nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
841 struct nouveau_tile_reg **new_tile)
6ee73861
BS
842{
843 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 844 struct drm_device *dev = dev_priv->dev;
a0af9add 845 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 846 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 847
a4154bbf
BS
848 *new_tile = NULL;
849 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 850 return 0;
a0af9add 851
a4154bbf 852 if (dev_priv->card_type >= NV_10) {
a0af9add 853 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
854 nvbo->tile_mode,
855 nvbo->tile_flags);
6ee73861
BS
856 }
857
a0af9add
FJ
858 return 0;
859}
860
861static void
862nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
863 struct nouveau_tile_reg *new_tile,
864 struct nouveau_tile_reg **old_tile)
865{
866 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
867 struct drm_device *dev = dev_priv->dev;
868
a4154bbf
BS
869 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
870 *old_tile = new_tile;
a0af9add
FJ
871}
872
873static int
874nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
875 bool no_wait_reserve, bool no_wait_gpu,
876 struct ttm_mem_reg *new_mem)
a0af9add
FJ
877{
878 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
879 struct nouveau_bo *nvbo = nouveau_bo(bo);
880 struct ttm_mem_reg *old_mem = &bo->mem;
881 struct nouveau_tile_reg *new_tile = NULL;
882 int ret = 0;
883
a4154bbf
BS
884 if (dev_priv->card_type < NV_50) {
885 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
886 if (ret)
887 return ret;
888 }
a0af9add 889
a0af9add 890 /* Fake bo copy. */
6ee73861
BS
891 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
892 BUG_ON(bo->mem.mm_node != NULL);
893 bo->mem = *new_mem;
894 new_mem->mm_node = NULL;
a0af9add 895 goto out;
6ee73861
BS
896 }
897
b8a6a804 898 /* Software copy if the card isn't up and running yet. */
183720b8 899 if (!dev_priv->channel) {
b8a6a804
BS
900 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
901 goto out;
902 }
903
a0af9add
FJ
904 /* Hardware assisted copy. */
905 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 906 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 907 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 908 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 909 else
9d87fa21 910 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 911
a0af9add
FJ
912 if (!ret)
913 goto out;
914
915 /* Fallback to software copy. */
9d87fa21 916 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
917
918out:
a4154bbf
BS
919 if (dev_priv->card_type < NV_50) {
920 if (ret)
921 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
922 else
923 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
924 }
a0af9add
FJ
925
926 return ret;
6ee73861
BS
927}
928
929static int
930nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
931{
932 return 0;
933}
934
f32f02fd
JG
935static int
936nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
937{
938 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
939 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
940 struct drm_device *dev = dev_priv->dev;
f869ef88 941 int ret;
f32f02fd
JG
942
943 mem->bus.addr = NULL;
944 mem->bus.offset = 0;
945 mem->bus.size = mem->num_pages << PAGE_SHIFT;
946 mem->bus.base = 0;
947 mem->bus.is_iomem = false;
948 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
949 return -EINVAL;
950 switch (mem->mem_type) {
951 case TTM_PL_SYSTEM:
952 /* System memory */
953 return 0;
954 case TTM_PL_TT:
955#if __OS_HAS_AGP
956 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 957 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
958 mem->bus.base = dev_priv->gart_info.aper_base;
959 mem->bus.is_iomem = true;
960 }
961#endif
962 break;
963 case TTM_PL_VRAM:
f869ef88 964 {
d5f42394 965 struct nouveau_mem *node = mem->mm_node;
8984e046 966 u8 page_shift;
f869ef88
BS
967
968 if (!dev_priv->bar1_vm) {
969 mem->bus.offset = mem->start << PAGE_SHIFT;
970 mem->bus.base = pci_resource_start(dev->pdev, 1);
971 mem->bus.is_iomem = true;
972 break;
973 }
974
2e9733ff 975 if (dev_priv->card_type >= NV_C0)
d5f42394 976 page_shift = node->page_shift;
8984e046
BS
977 else
978 page_shift = 12;
979
4c74eb7f 980 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
8984e046 981 page_shift, NV_MEM_ACCESS_RW,
d5f42394 982 &node->bar_vma);
f869ef88
BS
983 if (ret)
984 return ret;
985
d5f42394 986 nouveau_vm_map(&node->bar_vma, node);
f869ef88 987 if (ret) {
d5f42394 988 nouveau_vm_put(&node->bar_vma);
f869ef88
BS
989 return ret;
990 }
991
d5f42394 992 mem->bus.offset = node->bar_vma.offset;
8984e046
BS
993 if (dev_priv->card_type == NV_50) /*XXX*/
994 mem->bus.offset -= 0x0020000000ULL;
01d73a69 995 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd 996 mem->bus.is_iomem = true;
f869ef88 997 }
f32f02fd
JG
998 break;
999 default:
1000 return -EINVAL;
1001 }
1002 return 0;
1003}
1004
1005static void
1006nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1007{
f869ef88 1008 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
d5f42394 1009 struct nouveau_mem *node = mem->mm_node;
f869ef88
BS
1010
1011 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1012 return;
1013
d5f42394 1014 if (!node->bar_vma.node)
f869ef88
BS
1015 return;
1016
d5f42394
BS
1017 nouveau_vm_unmap(&node->bar_vma);
1018 nouveau_vm_put(&node->bar_vma);
f32f02fd
JG
1019}
1020
1021static int
1022nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1023{
e1429b4c
BS
1024 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1025 struct nouveau_bo *nvbo = nouveau_bo(bo);
1026
1027 /* as long as the bo isn't in vram, and isn't tiled, we've got
1028 * nothing to do here.
1029 */
1030 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
1031 if (dev_priv->card_type < NV_50 ||
1032 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1033 return 0;
1034 }
1035
1036 /* make sure bo is in mappable vram */
d961db75 1037 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
1038 return 0;
1039
1040
1041 nvbo->placement.fpfn = 0;
1042 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
c284815d 1043 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
7a45d764 1044 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
1045}
1046
332b242f
FJ
1047void
1048nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1049{
23c45e8e 1050 struct nouveau_fence *old_fence;
332b242f
FJ
1051
1052 if (likely(fence))
23c45e8e 1053 nouveau_fence_ref(fence);
332b242f 1054
23c45e8e
FJ
1055 spin_lock(&nvbo->bo.bdev->fence_lock);
1056 old_fence = nvbo->bo.sync_obj;
1057 nvbo->bo.sync_obj = fence;
332b242f 1058 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
1059
1060 nouveau_fence_unref(&old_fence);
332b242f
FJ
1061}
1062
3230cfc3
KRW
1063static int
1064nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1065{
8e7e7052 1066 struct ttm_dma_tt *ttm_dma = (void *)ttm;
3230cfc3
KRW
1067 struct drm_nouveau_private *dev_priv;
1068 struct drm_device *dev;
1069 unsigned i;
1070 int r;
22b33e8e 1071 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1072
1073 if (ttm->state != tt_unpopulated)
1074 return 0;
1075
22b33e8e
DA
1076 if (slave && ttm->sg) {
1077 /* make userspace faulting work */
1078 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1079 ttm_dma->dma_address, ttm->num_pages);
1080 ttm->state = tt_unbound;
1081 return 0;
1082 }
1083
3230cfc3
KRW
1084 dev_priv = nouveau_bdev(ttm->bdev);
1085 dev = dev_priv->dev;
1086
dea7e0ac
JG
1087#if __OS_HAS_AGP
1088 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1089 return ttm_agp_tt_populate(ttm);
1090 }
1091#endif
1092
3230cfc3
KRW
1093#ifdef CONFIG_SWIOTLB
1094 if (swiotlb_nr_tbl()) {
8e7e7052 1095 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1096 }
1097#endif
1098
1099 r = ttm_pool_populate(ttm);
1100 if (r) {
1101 return r;
1102 }
1103
1104 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1105 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
3230cfc3
KRW
1106 0, PAGE_SIZE,
1107 PCI_DMA_BIDIRECTIONAL);
8e7e7052 1108 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
3230cfc3 1109 while (--i) {
8e7e7052 1110 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3 1111 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
8e7e7052 1112 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1113 }
1114 ttm_pool_unpopulate(ttm);
1115 return -EFAULT;
1116 }
1117 }
1118 return 0;
1119}
1120
1121static void
1122nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1123{
8e7e7052 1124 struct ttm_dma_tt *ttm_dma = (void *)ttm;
3230cfc3
KRW
1125 struct drm_nouveau_private *dev_priv;
1126 struct drm_device *dev;
1127 unsigned i;
22b33e8e
DA
1128 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1129
1130 if (slave)
1131 return;
3230cfc3
KRW
1132
1133 dev_priv = nouveau_bdev(ttm->bdev);
1134 dev = dev_priv->dev;
1135
dea7e0ac
JG
1136#if __OS_HAS_AGP
1137 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1138 ttm_agp_tt_unpopulate(ttm);
1139 return;
1140 }
1141#endif
1142
3230cfc3
KRW
1143#ifdef CONFIG_SWIOTLB
1144 if (swiotlb_nr_tbl()) {
8e7e7052 1145 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1146 return;
1147 }
1148#endif
1149
1150 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052
JG
1151 if (ttm_dma->dma_address[i]) {
1152 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3
KRW
1153 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1154 }
1155 }
1156
1157 ttm_pool_unpopulate(ttm);
1158}
1159
6ee73861 1160struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1161 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1162 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1163 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1164 .invalidate_caches = nouveau_bo_invalidate_caches,
1165 .init_mem_type = nouveau_bo_init_mem_type,
1166 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1167 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1168 .move = nouveau_bo_move,
1169 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
1170 .sync_obj_signaled = __nouveau_fence_signalled,
1171 .sync_obj_wait = __nouveau_fence_wait,
1172 .sync_obj_flush = __nouveau_fence_flush,
1173 .sync_obj_unref = __nouveau_fence_unref,
1174 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
1175 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1176 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1177 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1178};
1179
fd2871af
BS
1180struct nouveau_vma *
1181nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1182{
1183 struct nouveau_vma *vma;
1184 list_for_each_entry(vma, &nvbo->vma_list, head) {
1185 if (vma->vm == vm)
1186 return vma;
1187 }
1188
1189 return NULL;
1190}
1191
1192int
1193nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1194 struct nouveau_vma *vma)
1195{
1196 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1197 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1198 int ret;
1199
1200 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1201 NV_MEM_ACCESS_RW, vma);
1202 if (ret)
1203 return ret;
1204
1205 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1206 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
22b33e8e
DA
1207 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1208 if (node->sg)
1209 nouveau_vm_map_sg_table(vma, 0, size, node);
1210 else
1211 nouveau_vm_map_sg(vma, 0, size, node);
1212 }
fd2871af
BS
1213
1214 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1215 vma->refcount = 1;
fd2871af
BS
1216 return 0;
1217}
1218
1219void
1220nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1221{
1222 if (vma->node) {
1223 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1224 spin_lock(&nvbo->bo.bdev->fence_lock);
1717c0e2 1225 ttm_bo_wait(&nvbo->bo, false, false, false);
fd2871af
BS
1226 spin_unlock(&nvbo->bo.bdev->fence_lock);
1227 nouveau_vm_unmap(vma);
1228 }
1229
1230 nouveau_vm_put(vma);
1231 list_del(&vma->head);
1232 }
1233}