]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/nouveau/nouveau_bo.c
drm/nouveau: introduce new gart type, and name _SGDMA more appropriately
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
f869ef88
BS
35#include "nouveau_mm.h"
36#include "nouveau_vm.h"
6ee73861 37
a510604d 38#include <linux/log2.h>
5a0e3ad6 39#include <linux/slab.h>
a510604d 40
6ee73861
BS
41static void
42nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43{
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 45 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
47
6ee73861
BS
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
a5cf68b0 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
4c136142 52 nouveau_vm_put(&nvbo->vma);
6ee73861
BS
53 kfree(nvbo);
54}
55
a0af9add 56static void
bfd83aca
BS
57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
58 int *page_shift)
a0af9add 59{
bfd83aca 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
a0af9add 61
573a2a37 62 if (dev_priv->card_type < NV_50) {
bfd83aca 63 if (nvbo->tile_mode) {
a0af9add
FJ
64 if (dev_priv->chipset >= 0x40) {
65 *align = 65536;
bfd83aca 66 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
67
68 } else if (dev_priv->chipset >= 0x30) {
69 *align = 32768;
bfd83aca 70 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
71
72 } else if (dev_priv->chipset >= 0x20) {
73 *align = 16384;
bfd83aca 74 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
75
76 } else if (dev_priv->chipset >= 0x10) {
77 *align = 16384;
bfd83aca 78 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
79 }
80 }
bfd83aca
BS
81 } else {
82 if (likely(dev_priv->chan_vm)) {
83 if (*size > 256 * 1024)
84 *page_shift = dev_priv->chan_vm->lpg_shift;
85 else
86 *page_shift = dev_priv->chan_vm->spg_shift;
87 } else {
88 *page_shift = 12;
89 }
90
91 *size = roundup(*size, (1 << *page_shift));
92 *align = max((1 << *page_shift), *align);
a0af9add
FJ
93 }
94
1c7059e4 95 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
96}
97
6ee73861
BS
98int
99nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
100 int size, int align, uint32_t flags, uint32_t tile_mode,
101 uint32_t tile_flags, bool no_vm, bool mappable,
102 struct nouveau_bo **pnvbo)
103{
104 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_bo *nvbo;
bfd83aca 106 int ret = 0, page_shift = 0;
6ee73861
BS
107
108 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
109 if (!nvbo)
110 return -ENOMEM;
111 INIT_LIST_HEAD(&nvbo->head);
112 INIT_LIST_HEAD(&nvbo->entry);
113 nvbo->mappable = mappable;
114 nvbo->no_vm = no_vm;
115 nvbo->tile_mode = tile_mode;
116 nvbo->tile_flags = tile_flags;
699ddfd9 117 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 118
bfd83aca 119 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
6ee73861
BS
120 align >>= PAGE_SHIFT;
121
4c136142 122 if (!nvbo->no_vm && dev_priv->chan_vm) {
bfd83aca 123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
4c136142
BS
124 NV_MEM_ACCESS_RW, &nvbo->vma);
125 if (ret) {
126 kfree(nvbo);
127 return ret;
128 }
129 }
130
812f219a 131 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
78ad0f7b 132 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861
BS
133
134 nvbo->channel = chan;
6ee73861
BS
135 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
136 ttm_bo_type_device, &nvbo->placement, align, 0,
137 false, NULL, size, nouveau_bo_del_ttm);
6ee73861
BS
138 if (ret) {
139 /* ttm will call nouveau_bo_del_ttm if it fails.. */
140 return ret;
141 }
90af89b9 142 nvbo->channel = NULL;
6ee73861 143
4c136142
BS
144 if (nvbo->vma.node) {
145 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
146 nvbo->bo.offset = nvbo->vma.offset;
147 }
148
6ee73861
BS
149 *pnvbo = nvbo;
150 return 0;
151}
152
78ad0f7b
FJ
153static void
154set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
155{
156 *n = 0;
157
158 if (type & TTM_PL_FLAG_VRAM)
159 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
160 if (type & TTM_PL_FLAG_TT)
161 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
162 if (type & TTM_PL_FLAG_SYSTEM)
163 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
164}
165
699ddfd9
FJ
166static void
167set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
168{
169 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
812f219a 170 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
699ddfd9
FJ
171
172 if (dev_priv->card_type == NV_10 &&
812f219a
FJ
173 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
174 nvbo->bo.mem.num_pages < vram_pages / 2) {
699ddfd9
FJ
175 /*
176 * Make sure that the color and depth buffers are handled
177 * by independent memory controller units. Up to a 9x
178 * speed up when alpha-blending and depth-test are enabled
179 * at the same time.
180 */
699ddfd9
FJ
181 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
182 nvbo->placement.fpfn = vram_pages / 2;
183 nvbo->placement.lpfn = ~0;
184 } else {
185 nvbo->placement.fpfn = 0;
186 nvbo->placement.lpfn = vram_pages / 2;
187 }
188 }
189}
190
6ee73861 191void
78ad0f7b 192nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 193{
78ad0f7b
FJ
194 struct ttm_placement *pl = &nvbo->placement;
195 uint32_t flags = TTM_PL_MASK_CACHING |
196 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
197
198 pl->placement = nvbo->placements;
199 set_placement_list(nvbo->placements, &pl->num_placement,
200 type, flags);
201
202 pl->busy_placement = nvbo->busy_placements;
203 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
204 type | busy, flags);
699ddfd9
FJ
205
206 set_placement_range(nvbo, type);
6ee73861
BS
207}
208
209int
210nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
211{
212 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
213 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 214 int ret;
6ee73861
BS
215
216 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
217 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
218 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
219 1 << bo->mem.mem_type, memtype);
220 return -EINVAL;
221 }
222
223 if (nvbo->pin_refcnt++)
224 return 0;
225
226 ret = ttm_bo_reserve(bo, false, false, false, 0);
227 if (ret)
228 goto out;
229
78ad0f7b 230 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 231
7a45d764 232 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
233 if (ret == 0) {
234 switch (bo->mem.mem_type) {
235 case TTM_PL_VRAM:
236 dev_priv->fb_aper_free -= bo->mem.size;
237 break;
238 case TTM_PL_TT:
239 dev_priv->gart_info.aper_free -= bo->mem.size;
240 break;
241 default:
242 break;
243 }
244 }
245 ttm_bo_unreserve(bo);
246out:
247 if (unlikely(ret))
248 nvbo->pin_refcnt--;
249 return ret;
250}
251
252int
253nouveau_bo_unpin(struct nouveau_bo *nvbo)
254{
255 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
256 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 257 int ret;
6ee73861
BS
258
259 if (--nvbo->pin_refcnt)
260 return 0;
261
262 ret = ttm_bo_reserve(bo, false, false, false, 0);
263 if (ret)
264 return ret;
265
78ad0f7b 266 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 267
7a45d764 268 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
269 if (ret == 0) {
270 switch (bo->mem.mem_type) {
271 case TTM_PL_VRAM:
272 dev_priv->fb_aper_free += bo->mem.size;
273 break;
274 case TTM_PL_TT:
275 dev_priv->gart_info.aper_free += bo->mem.size;
276 break;
277 default:
278 break;
279 }
280 }
281
282 ttm_bo_unreserve(bo);
283 return ret;
284}
285
286int
287nouveau_bo_map(struct nouveau_bo *nvbo)
288{
289 int ret;
290
291 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
292 if (ret)
293 return ret;
294
295 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
296 ttm_bo_unreserve(&nvbo->bo);
297 return ret;
298}
299
300void
301nouveau_bo_unmap(struct nouveau_bo *nvbo)
302{
9d59e8a1
BS
303 if (nvbo)
304 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
305}
306
7a45d764
BS
307int
308nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
309 bool no_wait_reserve, bool no_wait_gpu)
310{
311 int ret;
312
313 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
314 no_wait_reserve, no_wait_gpu);
315 if (ret)
316 return ret;
317
4c136142
BS
318 if (nvbo->vma.node) {
319 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
320 nvbo->bo.offset = nvbo->vma.offset;
321 }
322
7a45d764
BS
323 return 0;
324}
325
6ee73861
BS
326u16
327nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
328{
329 bool is_iomem;
330 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
331 mem = &mem[index];
332 if (is_iomem)
333 return ioread16_native((void __force __iomem *)mem);
334 else
335 return *mem;
336}
337
338void
339nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
340{
341 bool is_iomem;
342 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
343 mem = &mem[index];
344 if (is_iomem)
345 iowrite16_native(val, (void __force __iomem *)mem);
346 else
347 *mem = val;
348}
349
350u32
351nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
352{
353 bool is_iomem;
354 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
355 mem = &mem[index];
356 if (is_iomem)
357 return ioread32_native((void __force __iomem *)mem);
358 else
359 return *mem;
360}
361
362void
363nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
364{
365 bool is_iomem;
366 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
367 mem = &mem[index];
368 if (is_iomem)
369 iowrite32_native(val, (void __force __iomem *)mem);
370 else
371 *mem = val;
372}
373
374static struct ttm_backend *
375nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
376{
377 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
378 struct drm_device *dev = dev_priv->dev;
379
380 switch (dev_priv->gart_info.type) {
b694dfb2 381#if __OS_HAS_AGP
6ee73861
BS
382 case NOUVEAU_GART_AGP:
383 return ttm_agp_backend_init(bdev, dev->agp->bridge);
b694dfb2 384#endif
58e6c7a9
BS
385 case NOUVEAU_GART_PDMA:
386 case NOUVEAU_GART_HW:
6ee73861
BS
387 return nouveau_sgdma_init_ttm(dev);
388 default:
389 NV_ERROR(dev, "Unknown GART type %d\n",
390 dev_priv->gart_info.type);
391 break;
392 }
393
394 return NULL;
395}
396
397static int
398nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
399{
400 /* We'll do this from user space. */
401 return 0;
402}
403
404static int
405nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
406 struct ttm_mem_type_manager *man)
407{
408 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
409 struct drm_device *dev = dev_priv->dev;
410
411 switch (type) {
412 case TTM_PL_SYSTEM:
413 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
414 man->available_caching = TTM_PL_MASK_CACHING;
415 man->default_caching = TTM_PL_FLAG_CACHED;
416 break;
417 case TTM_PL_VRAM:
8984e046 418 if (dev_priv->card_type >= NV_50) {
573a2a37 419 man->func = &nouveau_vram_manager;
f869ef88
BS
420 man->io_reserve_fastpath = false;
421 man->use_io_reserve_lru = true;
422 } else {
573a2a37 423 man->func = &ttm_bo_manager_func;
f869ef88 424 }
6ee73861 425 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 426 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
427 man->available_caching = TTM_PL_FLAG_UNCACHED |
428 TTM_PL_FLAG_WC;
429 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
430 break;
431 case TTM_PL_TT:
d961db75 432 man->func = &ttm_bo_manager_func;
6ee73861
BS
433 switch (dev_priv->gart_info.type) {
434 case NOUVEAU_GART_AGP:
f32f02fd 435 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
436 man->available_caching = TTM_PL_FLAG_UNCACHED |
437 TTM_PL_FLAG_WC;
438 man->default_caching = TTM_PL_FLAG_WC;
6ee73861 439 break;
58e6c7a9
BS
440 case NOUVEAU_GART_PDMA:
441 case NOUVEAU_GART_HW:
6ee73861
BS
442 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
443 TTM_MEMTYPE_FLAG_CMA;
444 man->available_caching = TTM_PL_MASK_CACHING;
445 man->default_caching = TTM_PL_FLAG_CACHED;
b571fe21 446 man->gpu_offset = dev_priv->gart_info.aper_base;
6ee73861
BS
447 break;
448 default:
449 NV_ERROR(dev, "Unknown GART type: %d\n",
450 dev_priv->gart_info.type);
451 return -EINVAL;
452 }
6ee73861
BS
453 break;
454 default:
455 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
456 return -EINVAL;
457 }
458 return 0;
459}
460
461static void
462nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
463{
464 struct nouveau_bo *nvbo = nouveau_bo(bo);
465
466 switch (bo->mem.mem_type) {
22fbd538 467 case TTM_PL_VRAM:
78ad0f7b
FJ
468 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
469 TTM_PL_FLAG_SYSTEM);
22fbd538 470 break;
6ee73861 471 default:
78ad0f7b 472 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
473 break;
474 }
22fbd538
FJ
475
476 *pl = nvbo->placement;
6ee73861
BS
477}
478
479
480/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
481 * TTM_PL_{VRAM,TT} directly.
482 */
a0af9add 483
6ee73861
BS
484static int
485nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
486 struct nouveau_bo *nvbo, bool evict,
487 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
488 struct ttm_mem_reg *new_mem)
489{
490 struct nouveau_fence *fence = NULL;
491 int ret;
492
493 ret = nouveau_fence_new(chan, &fence, true);
494 if (ret)
495 return ret;
496
64798817 497 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 498 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 499 nouveau_fence_unref(&fence);
6ee73861
BS
500 return ret;
501}
502
503static inline uint32_t
f1ab0cc9
BS
504nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
505 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
6ee73861 506{
f1ab0cc9
BS
507 struct nouveau_bo *nvbo = nouveau_bo(bo);
508
509 if (nvbo->no_vm) {
6ee73861
BS
510 if (mem->mem_type == TTM_PL_TT)
511 return NvDmaGART;
512 return NvDmaVRAM;
513 }
514
515 if (mem->mem_type == TTM_PL_TT)
516 return chan->gart_handle;
517 return chan->vram_handle;
518}
519
183720b8
BS
520static int
521nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
522 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
523{
524 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
525 struct nouveau_bo *nvbo = nouveau_bo(bo);
526 u64 src_offset = old_mem->start << PAGE_SHIFT;
527 u64 dst_offset = new_mem->start << PAGE_SHIFT;
528 u32 page_count = new_mem->num_pages;
529 int ret;
530
531 if (!nvbo->no_vm) {
532 if (old_mem->mem_type == TTM_PL_VRAM)
533 src_offset = nvbo->vma.offset;
534 else
535 src_offset += dev_priv->gart_info.aper_base;
536
537 if (new_mem->mem_type == TTM_PL_VRAM)
538 dst_offset = nvbo->vma.offset;
539 else
540 dst_offset += dev_priv->gart_info.aper_base;
541 }
542
543 page_count = new_mem->num_pages;
544 while (page_count) {
545 int line_count = (page_count > 2047) ? 2047 : page_count;
546
547 ret = RING_SPACE(chan, 12);
548 if (ret)
549 return ret;
550
551 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
552 OUT_RING (chan, upper_32_bits(dst_offset));
553 OUT_RING (chan, lower_32_bits(dst_offset));
554 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
555 OUT_RING (chan, upper_32_bits(src_offset));
556 OUT_RING (chan, lower_32_bits(src_offset));
557 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
558 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
559 OUT_RING (chan, PAGE_SIZE); /* line_length */
560 OUT_RING (chan, line_count);
561 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
562 OUT_RING (chan, 0x00100110);
563
564 page_count -= line_count;
565 src_offset += (PAGE_SIZE * line_count);
566 dst_offset += (PAGE_SIZE * line_count);
567 }
568
569 return 0;
570}
571
6ee73861 572static int
f1ab0cc9
BS
573nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
574 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 575{
6ee73861 576 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
f1ab0cc9
BS
577 struct nouveau_bo *nvbo = nouveau_bo(bo);
578 u64 length = (new_mem->num_pages << PAGE_SHIFT);
579 u64 src_offset, dst_offset;
6ee73861
BS
580 int ret;
581
d961db75
BS
582 src_offset = old_mem->start << PAGE_SHIFT;
583 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
584 if (!nvbo->no_vm) {
585 if (old_mem->mem_type == TTM_PL_VRAM)
4c136142 586 src_offset = nvbo->vma.offset;
6ee73861 587 else
b571fe21 588 src_offset += dev_priv->gart_info.aper_base;
f1ab0cc9
BS
589
590 if (new_mem->mem_type == TTM_PL_VRAM)
4c136142 591 dst_offset = nvbo->vma.offset;
f1ab0cc9 592 else
b571fe21 593 dst_offset += dev_priv->gart_info.aper_base;
6ee73861
BS
594 }
595
596 ret = RING_SPACE(chan, 3);
597 if (ret)
598 return ret;
6ee73861 599
f1ab0cc9
BS
600 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
601 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
602 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
603
604 while (length) {
605 u32 amount, stride, height;
606
5220b3c1
BS
607 amount = min(length, (u64)(4 * 1024 * 1024));
608 stride = 16 * 4;
f1ab0cc9
BS
609 height = amount / stride;
610
f13b3263
FJ
611 if (new_mem->mem_type == TTM_PL_VRAM &&
612 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
613 ret = RING_SPACE(chan, 8);
614 if (ret)
615 return ret;
616
617 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
618 OUT_RING (chan, 0);
5220b3c1 619 OUT_RING (chan, 0);
f1ab0cc9
BS
620 OUT_RING (chan, stride);
621 OUT_RING (chan, height);
622 OUT_RING (chan, 1);
623 OUT_RING (chan, 0);
624 OUT_RING (chan, 0);
625 } else {
626 ret = RING_SPACE(chan, 2);
627 if (ret)
628 return ret;
629
630 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
631 OUT_RING (chan, 1);
632 }
f13b3263
FJ
633 if (old_mem->mem_type == TTM_PL_VRAM &&
634 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
635 ret = RING_SPACE(chan, 8);
636 if (ret)
637 return ret;
638
639 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
640 OUT_RING (chan, 0);
5220b3c1 641 OUT_RING (chan, 0);
f1ab0cc9
BS
642 OUT_RING (chan, stride);
643 OUT_RING (chan, height);
644 OUT_RING (chan, 1);
645 OUT_RING (chan, 0);
646 OUT_RING (chan, 0);
647 } else {
648 ret = RING_SPACE(chan, 2);
649 if (ret)
650 return ret;
651
652 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
653 OUT_RING (chan, 1);
654 }
655
656 ret = RING_SPACE(chan, 14);
6ee73861
BS
657 if (ret)
658 return ret;
f1ab0cc9
BS
659
660 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
661 OUT_RING (chan, upper_32_bits(src_offset));
662 OUT_RING (chan, upper_32_bits(dst_offset));
663 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
664 OUT_RING (chan, lower_32_bits(src_offset));
665 OUT_RING (chan, lower_32_bits(dst_offset));
666 OUT_RING (chan, stride);
667 OUT_RING (chan, stride);
668 OUT_RING (chan, stride);
669 OUT_RING (chan, height);
670 OUT_RING (chan, 0x00000101);
671 OUT_RING (chan, 0x00000000);
672 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
673 OUT_RING (chan, 0);
674
675 length -= amount;
676 src_offset += amount;
677 dst_offset += amount;
6ee73861
BS
678 }
679
f1ab0cc9
BS
680 return 0;
681}
682
683static int
684nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
685 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
686{
d961db75
BS
687 u32 src_offset = old_mem->start << PAGE_SHIFT;
688 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
689 u32 page_count = new_mem->num_pages;
690 int ret;
691
692 ret = RING_SPACE(chan, 3);
693 if (ret)
694 return ret;
695
696 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
697 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
698 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
699
6ee73861
BS
700 page_count = new_mem->num_pages;
701 while (page_count) {
702 int line_count = (page_count > 2047) ? 2047 : page_count;
703
6ee73861
BS
704 ret = RING_SPACE(chan, 11);
705 if (ret)
706 return ret;
f1ab0cc9 707
6ee73861
BS
708 BEGIN_RING(chan, NvSubM2MF,
709 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
710 OUT_RING (chan, src_offset);
711 OUT_RING (chan, dst_offset);
712 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
713 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
714 OUT_RING (chan, PAGE_SIZE); /* line_length */
715 OUT_RING (chan, line_count);
716 OUT_RING (chan, 0x00000101);
717 OUT_RING (chan, 0x00000000);
6ee73861 718 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 719 OUT_RING (chan, 0);
6ee73861
BS
720
721 page_count -= line_count;
722 src_offset += (PAGE_SIZE * line_count);
723 dst_offset += (PAGE_SIZE * line_count);
724 }
725
f1ab0cc9
BS
726 return 0;
727}
728
729static int
730nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
731 bool no_wait_reserve, bool no_wait_gpu,
732 struct ttm_mem_reg *new_mem)
733{
734 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
735 struct nouveau_bo *nvbo = nouveau_bo(bo);
736 struct nouveau_channel *chan;
737 int ret;
738
739 chan = nvbo->channel;
6a6b73f2 740 if (!chan || nvbo->no_vm) {
f1ab0cc9 741 chan = dev_priv->channel;
e419cf09 742 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
6a6b73f2 743 }
f1ab0cc9
BS
744
745 if (dev_priv->card_type < NV_50)
746 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
747 else
183720b8 748 if (dev_priv->card_type < NV_C0)
f1ab0cc9 749 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
183720b8
BS
750 else
751 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
752 if (ret == 0) {
753 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
754 no_wait_reserve,
755 no_wait_gpu, new_mem);
756 }
f1ab0cc9 757
6a6b73f2
BS
758 if (chan == dev_priv->channel)
759 mutex_unlock(&chan->mutex);
760 return ret;
6ee73861
BS
761}
762
763static int
764nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
765 bool no_wait_reserve, bool no_wait_gpu,
766 struct ttm_mem_reg *new_mem)
6ee73861
BS
767{
768 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
769 struct ttm_placement placement;
770 struct ttm_mem_reg tmp_mem;
771 int ret;
772
773 placement.fpfn = placement.lpfn = 0;
774 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 775 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
776
777 tmp_mem = *new_mem;
778 tmp_mem.mm_node = NULL;
9d87fa21 779 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
780 if (ret)
781 return ret;
782
783 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
784 if (ret)
785 goto out;
786
9d87fa21 787 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
788 if (ret)
789 goto out;
790
b8884da6 791 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 792out:
42311ff9 793 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
794 return ret;
795}
796
797static int
798nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
799 bool no_wait_reserve, bool no_wait_gpu,
800 struct ttm_mem_reg *new_mem)
6ee73861
BS
801{
802 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
803 struct ttm_placement placement;
804 struct ttm_mem_reg tmp_mem;
805 int ret;
806
807 placement.fpfn = placement.lpfn = 0;
808 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 809 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
810
811 tmp_mem = *new_mem;
812 tmp_mem.mm_node = NULL;
9d87fa21 813 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
814 if (ret)
815 return ret;
816
b8884da6 817 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
818 if (ret)
819 goto out;
820
b8884da6 821 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
822 if (ret)
823 goto out;
824
825out:
42311ff9 826 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
827 return ret;
828}
829
830static int
a0af9add
FJ
831nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
832 struct nouveau_tile_reg **new_tile)
6ee73861
BS
833{
834 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 835 struct drm_device *dev = dev_priv->dev;
a0af9add
FJ
836 struct nouveau_bo *nvbo = nouveau_bo(bo);
837 uint64_t offset;
6ee73861 838
a0af9add
FJ
839 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
840 /* Nothing to do. */
841 *new_tile = NULL;
842 return 0;
843 }
844
d961db75 845 offset = new_mem->start << PAGE_SHIFT;
6ee73861 846
4c136142
BS
847 if (dev_priv->chan_vm) {
848 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
a0af9add
FJ
849 } else if (dev_priv->card_type >= NV_10) {
850 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
851 nvbo->tile_mode,
852 nvbo->tile_flags);
6ee73861
BS
853 }
854
a0af9add
FJ
855 return 0;
856}
857
858static void
859nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
860 struct nouveau_tile_reg *new_tile,
861 struct nouveau_tile_reg **old_tile)
862{
863 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
864 struct drm_device *dev = dev_priv->dev;
865
866 if (dev_priv->card_type >= NV_10 &&
867 dev_priv->card_type < NV_50) {
a5cf68b0 868 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
a0af9add
FJ
869 *old_tile = new_tile;
870 }
871}
872
873static int
874nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
875 bool no_wait_reserve, bool no_wait_gpu,
876 struct ttm_mem_reg *new_mem)
a0af9add
FJ
877{
878 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
879 struct nouveau_bo *nvbo = nouveau_bo(bo);
880 struct ttm_mem_reg *old_mem = &bo->mem;
881 struct nouveau_tile_reg *new_tile = NULL;
882 int ret = 0;
883
884 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
885 if (ret)
886 return ret;
887
a0af9add 888 /* Fake bo copy. */
6ee73861
BS
889 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
890 BUG_ON(bo->mem.mm_node != NULL);
891 bo->mem = *new_mem;
892 new_mem->mm_node = NULL;
a0af9add 893 goto out;
6ee73861
BS
894 }
895
b8a6a804 896 /* Software copy if the card isn't up and running yet. */
183720b8 897 if (!dev_priv->channel) {
b8a6a804
BS
898 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
899 goto out;
900 }
901
a0af9add
FJ
902 /* Hardware assisted copy. */
903 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 904 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 905 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 906 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 907 else
9d87fa21 908 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 909
a0af9add
FJ
910 if (!ret)
911 goto out;
912
913 /* Fallback to software copy. */
9d87fa21 914 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
915
916out:
917 if (ret)
918 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
919 else
920 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
921
922 return ret;
6ee73861
BS
923}
924
925static int
926nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
927{
928 return 0;
929}
930
f32f02fd
JG
931static int
932nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
933{
934 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
935 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
936 struct drm_device *dev = dev_priv->dev;
f869ef88 937 int ret;
f32f02fd
JG
938
939 mem->bus.addr = NULL;
940 mem->bus.offset = 0;
941 mem->bus.size = mem->num_pages << PAGE_SHIFT;
942 mem->bus.base = 0;
943 mem->bus.is_iomem = false;
944 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
945 return -EINVAL;
946 switch (mem->mem_type) {
947 case TTM_PL_SYSTEM:
948 /* System memory */
949 return 0;
950 case TTM_PL_TT:
951#if __OS_HAS_AGP
952 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 953 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
954 mem->bus.base = dev_priv->gart_info.aper_base;
955 mem->bus.is_iomem = true;
956 }
957#endif
958 break;
959 case TTM_PL_VRAM:
f869ef88
BS
960 {
961 struct nouveau_vram *vram = mem->mm_node;
8984e046 962 u8 page_shift;
f869ef88
BS
963
964 if (!dev_priv->bar1_vm) {
965 mem->bus.offset = mem->start << PAGE_SHIFT;
966 mem->bus.base = pci_resource_start(dev->pdev, 1);
967 mem->bus.is_iomem = true;
968 break;
969 }
970
8984e046
BS
971 if (dev_priv->card_type == NV_C0)
972 page_shift = vram->page_shift;
973 else
974 page_shift = 12;
975
4c74eb7f 976 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
8984e046 977 page_shift, NV_MEM_ACCESS_RW,
4c74eb7f 978 &vram->bar_vma);
f869ef88
BS
979 if (ret)
980 return ret;
981
982 nouveau_vm_map(&vram->bar_vma, vram);
983 if (ret) {
984 nouveau_vm_put(&vram->bar_vma);
985 return ret;
986 }
987
8984e046
BS
988 mem->bus.offset = vram->bar_vma.offset;
989 if (dev_priv->card_type == NV_50) /*XXX*/
990 mem->bus.offset -= 0x0020000000ULL;
01d73a69 991 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd 992 mem->bus.is_iomem = true;
f869ef88 993 }
f32f02fd
JG
994 break;
995 default:
996 return -EINVAL;
997 }
998 return 0;
999}
1000
1001static void
1002nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1003{
f869ef88
BS
1004 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1005 struct nouveau_vram *vram = mem->mm_node;
1006
1007 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1008 return;
1009
1010 if (!vram->bar_vma.node)
1011 return;
1012
1013 nouveau_vm_unmap(&vram->bar_vma);
1014 nouveau_vm_put(&vram->bar_vma);
f32f02fd
JG
1015}
1016
1017static int
1018nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1019{
e1429b4c
BS
1020 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1021 struct nouveau_bo *nvbo = nouveau_bo(bo);
1022
1023 /* as long as the bo isn't in vram, and isn't tiled, we've got
1024 * nothing to do here.
1025 */
1026 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
1027 if (dev_priv->card_type < NV_50 ||
1028 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1029 return 0;
1030 }
1031
1032 /* make sure bo is in mappable vram */
d961db75 1033 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
1034 return 0;
1035
1036
1037 nvbo->placement.fpfn = 0;
1038 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1039 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
7a45d764 1040 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
1041}
1042
332b242f
FJ
1043void
1044nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1045{
23c45e8e 1046 struct nouveau_fence *old_fence;
332b242f
FJ
1047
1048 if (likely(fence))
23c45e8e 1049 nouveau_fence_ref(fence);
332b242f 1050
23c45e8e
FJ
1051 spin_lock(&nvbo->bo.bdev->fence_lock);
1052 old_fence = nvbo->bo.sync_obj;
1053 nvbo->bo.sync_obj = fence;
332b242f 1054 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
1055
1056 nouveau_fence_unref(&old_fence);
332b242f
FJ
1057}
1058
6ee73861
BS
1059struct ttm_bo_driver nouveau_bo_driver = {
1060 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1061 .invalidate_caches = nouveau_bo_invalidate_caches,
1062 .init_mem_type = nouveau_bo_init_mem_type,
1063 .evict_flags = nouveau_bo_evict_flags,
1064 .move = nouveau_bo_move,
1065 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
1066 .sync_obj_signaled = __nouveau_fence_signalled,
1067 .sync_obj_wait = __nouveau_fence_wait,
1068 .sync_obj_flush = __nouveau_fence_flush,
1069 .sync_obj_unref = __nouveau_fence_unref,
1070 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
1071 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1072 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1073 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1074};
1075