]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/nouveau/nouveau_bo.c
drm/nv50: import new vm code
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35
a510604d 36#include <linux/log2.h>
5a0e3ad6 37#include <linux/slab.h>
a510604d 38
6ee73861
BS
39static void
40nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
41{
42 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 43 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
44 struct nouveau_bo *nvbo = nouveau_bo(bo);
45
6ee73861
BS
46 if (unlikely(nvbo->gem))
47 DRM_ERROR("bo %p still attached to GEM object\n", bo);
48
a5cf68b0 49 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
50 kfree(nvbo);
51}
52
a0af9add
FJ
53static void
54nouveau_bo_fixup_align(struct drm_device *dev,
55 uint32_t tile_mode, uint32_t tile_flags,
56 int *align, int *size)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59
573a2a37 60 if (dev_priv->card_type < NV_50) {
a0af9add
FJ
61 if (tile_mode) {
62 if (dev_priv->chipset >= 0x40) {
63 *align = 65536;
64 *size = roundup(*size, 64 * tile_mode);
65
66 } else if (dev_priv->chipset >= 0x30) {
67 *align = 32768;
68 *size = roundup(*size, 64 * tile_mode);
69
70 } else if (dev_priv->chipset >= 0x20) {
71 *align = 16384;
72 *size = roundup(*size, 64 * tile_mode);
73
74 } else if (dev_priv->chipset >= 0x10) {
75 *align = 16384;
76 *size = roundup(*size, 32 * tile_mode);
77 }
78 }
79 }
80
1c7059e4
MM
81 /* ALIGN works only on powers of two. */
82 *size = roundup(*size, PAGE_SIZE);
a0af9add 83 if (dev_priv->card_type == NV_50) {
1c7059e4 84 *size = roundup(*size, 65536);
a0af9add
FJ
85 *align = max(65536, *align);
86 }
87}
88
6ee73861
BS
89int
90nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
91 int size, int align, uint32_t flags, uint32_t tile_mode,
92 uint32_t tile_flags, bool no_vm, bool mappable,
93 struct nouveau_bo **pnvbo)
94{
95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct nouveau_bo *nvbo;
8dea4a19 97 int ret = 0;
6ee73861
BS
98
99 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
100 if (!nvbo)
101 return -ENOMEM;
102 INIT_LIST_HEAD(&nvbo->head);
103 INIT_LIST_HEAD(&nvbo->entry);
104 nvbo->mappable = mappable;
105 nvbo->no_vm = no_vm;
106 nvbo->tile_mode = tile_mode;
107 nvbo->tile_flags = tile_flags;
699ddfd9 108 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 109
f13b3263
FJ
110 nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
111 &align, &size);
6ee73861
BS
112 align >>= PAGE_SHIFT;
113
78ad0f7b 114 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861
BS
115
116 nvbo->channel = chan;
6ee73861
BS
117 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
118 ttm_bo_type_device, &nvbo->placement, align, 0,
119 false, NULL, size, nouveau_bo_del_ttm);
6ee73861
BS
120 if (ret) {
121 /* ttm will call nouveau_bo_del_ttm if it fails.. */
122 return ret;
123 }
90af89b9 124 nvbo->channel = NULL;
6ee73861 125
6ee73861
BS
126 *pnvbo = nvbo;
127 return 0;
128}
129
78ad0f7b
FJ
130static void
131set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
132{
133 *n = 0;
134
135 if (type & TTM_PL_FLAG_VRAM)
136 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
137 if (type & TTM_PL_FLAG_TT)
138 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
139 if (type & TTM_PL_FLAG_SYSTEM)
140 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
141}
142
699ddfd9
FJ
143static void
144set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
145{
146 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
147
148 if (dev_priv->card_type == NV_10 &&
149 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
150 /*
151 * Make sure that the color and depth buffers are handled
152 * by independent memory controller units. Up to a 9x
153 * speed up when alpha-blending and depth-test are enabled
154 * at the same time.
155 */
156 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
157
158 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
159 nvbo->placement.fpfn = vram_pages / 2;
160 nvbo->placement.lpfn = ~0;
161 } else {
162 nvbo->placement.fpfn = 0;
163 nvbo->placement.lpfn = vram_pages / 2;
164 }
165 }
166}
167
6ee73861 168void
78ad0f7b 169nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 170{
78ad0f7b
FJ
171 struct ttm_placement *pl = &nvbo->placement;
172 uint32_t flags = TTM_PL_MASK_CACHING |
173 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
174
175 pl->placement = nvbo->placements;
176 set_placement_list(nvbo->placements, &pl->num_placement,
177 type, flags);
178
179 pl->busy_placement = nvbo->busy_placements;
180 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
181 type | busy, flags);
699ddfd9
FJ
182
183 set_placement_range(nvbo, type);
6ee73861
BS
184}
185
186int
187nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
188{
189 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
190 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 191 int ret;
6ee73861
BS
192
193 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
194 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
195 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
196 1 << bo->mem.mem_type, memtype);
197 return -EINVAL;
198 }
199
200 if (nvbo->pin_refcnt++)
201 return 0;
202
203 ret = ttm_bo_reserve(bo, false, false, false, 0);
204 if (ret)
205 goto out;
206
78ad0f7b 207 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 208
7a45d764 209 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
210 if (ret == 0) {
211 switch (bo->mem.mem_type) {
212 case TTM_PL_VRAM:
213 dev_priv->fb_aper_free -= bo->mem.size;
214 break;
215 case TTM_PL_TT:
216 dev_priv->gart_info.aper_free -= bo->mem.size;
217 break;
218 default:
219 break;
220 }
221 }
222 ttm_bo_unreserve(bo);
223out:
224 if (unlikely(ret))
225 nvbo->pin_refcnt--;
226 return ret;
227}
228
229int
230nouveau_bo_unpin(struct nouveau_bo *nvbo)
231{
232 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
233 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 234 int ret;
6ee73861
BS
235
236 if (--nvbo->pin_refcnt)
237 return 0;
238
239 ret = ttm_bo_reserve(bo, false, false, false, 0);
240 if (ret)
241 return ret;
242
78ad0f7b 243 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 244
7a45d764 245 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
246 if (ret == 0) {
247 switch (bo->mem.mem_type) {
248 case TTM_PL_VRAM:
249 dev_priv->fb_aper_free += bo->mem.size;
250 break;
251 case TTM_PL_TT:
252 dev_priv->gart_info.aper_free += bo->mem.size;
253 break;
254 default:
255 break;
256 }
257 }
258
259 ttm_bo_unreserve(bo);
260 return ret;
261}
262
263int
264nouveau_bo_map(struct nouveau_bo *nvbo)
265{
266 int ret;
267
268 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
269 if (ret)
270 return ret;
271
272 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
273 ttm_bo_unreserve(&nvbo->bo);
274 return ret;
275}
276
277void
278nouveau_bo_unmap(struct nouveau_bo *nvbo)
279{
9d59e8a1
BS
280 if (nvbo)
281 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
282}
283
7a45d764
BS
284int
285nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
286 bool no_wait_reserve, bool no_wait_gpu)
287{
288 int ret;
289
290 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
291 no_wait_reserve, no_wait_gpu);
292 if (ret)
293 return ret;
294
295 return 0;
296}
297
6ee73861
BS
298u16
299nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
300{
301 bool is_iomem;
302 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
303 mem = &mem[index];
304 if (is_iomem)
305 return ioread16_native((void __force __iomem *)mem);
306 else
307 return *mem;
308}
309
310void
311nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
312{
313 bool is_iomem;
314 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
315 mem = &mem[index];
316 if (is_iomem)
317 iowrite16_native(val, (void __force __iomem *)mem);
318 else
319 *mem = val;
320}
321
322u32
323nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
324{
325 bool is_iomem;
326 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
327 mem = &mem[index];
328 if (is_iomem)
329 return ioread32_native((void __force __iomem *)mem);
330 else
331 return *mem;
332}
333
334void
335nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
336{
337 bool is_iomem;
338 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
339 mem = &mem[index];
340 if (is_iomem)
341 iowrite32_native(val, (void __force __iomem *)mem);
342 else
343 *mem = val;
344}
345
346static struct ttm_backend *
347nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
348{
349 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
350 struct drm_device *dev = dev_priv->dev;
351
352 switch (dev_priv->gart_info.type) {
b694dfb2 353#if __OS_HAS_AGP
6ee73861
BS
354 case NOUVEAU_GART_AGP:
355 return ttm_agp_backend_init(bdev, dev->agp->bridge);
b694dfb2 356#endif
6ee73861
BS
357 case NOUVEAU_GART_SGDMA:
358 return nouveau_sgdma_init_ttm(dev);
359 default:
360 NV_ERROR(dev, "Unknown GART type %d\n",
361 dev_priv->gart_info.type);
362 break;
363 }
364
365 return NULL;
366}
367
368static int
369nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
370{
371 /* We'll do this from user space. */
372 return 0;
373}
374
375static int
376nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
377 struct ttm_mem_type_manager *man)
378{
379 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
380 struct drm_device *dev = dev_priv->dev;
381
382 switch (type) {
383 case TTM_PL_SYSTEM:
384 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
385 man->available_caching = TTM_PL_MASK_CACHING;
386 man->default_caching = TTM_PL_FLAG_CACHED;
387 break;
388 case TTM_PL_VRAM:
573a2a37
BS
389 if (dev_priv->card_type == NV_50)
390 man->func = &nouveau_vram_manager;
391 else
392 man->func = &ttm_bo_manager_func;
6ee73861 393 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 394 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
395 man->available_caching = TTM_PL_FLAG_UNCACHED |
396 TTM_PL_FLAG_WC;
397 man->default_caching = TTM_PL_FLAG_WC;
fbd2895e
BS
398 if (dev_priv->card_type == NV_50)
399 man->gpu_offset = 0x40000000;
400 else
401 man->gpu_offset = 0;
6ee73861
BS
402 break;
403 case TTM_PL_TT:
d961db75 404 man->func = &ttm_bo_manager_func;
6ee73861
BS
405 switch (dev_priv->gart_info.type) {
406 case NOUVEAU_GART_AGP:
f32f02fd 407 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
408 man->available_caching = TTM_PL_FLAG_UNCACHED |
409 TTM_PL_FLAG_WC;
410 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
411 break;
412 case NOUVEAU_GART_SGDMA:
413 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
414 TTM_MEMTYPE_FLAG_CMA;
415 man->available_caching = TTM_PL_MASK_CACHING;
416 man->default_caching = TTM_PL_FLAG_CACHED;
417 break;
418 default:
419 NV_ERROR(dev, "Unknown GART type: %d\n",
420 dev_priv->gart_info.type);
421 return -EINVAL;
422 }
6ee73861
BS
423 man->gpu_offset = dev_priv->vm_gart_base;
424 break;
425 default:
426 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
427 return -EINVAL;
428 }
429 return 0;
430}
431
432static void
433nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
434{
435 struct nouveau_bo *nvbo = nouveau_bo(bo);
436
437 switch (bo->mem.mem_type) {
22fbd538 438 case TTM_PL_VRAM:
78ad0f7b
FJ
439 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
440 TTM_PL_FLAG_SYSTEM);
22fbd538 441 break;
6ee73861 442 default:
78ad0f7b 443 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
444 break;
445 }
22fbd538
FJ
446
447 *pl = nvbo->placement;
6ee73861
BS
448}
449
450
451/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
452 * TTM_PL_{VRAM,TT} directly.
453 */
a0af9add 454
6ee73861
BS
455static int
456nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
457 struct nouveau_bo *nvbo, bool evict,
458 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
459 struct ttm_mem_reg *new_mem)
460{
461 struct nouveau_fence *fence = NULL;
462 int ret;
463
464 ret = nouveau_fence_new(chan, &fence, true);
465 if (ret)
466 return ret;
467
64798817 468 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 469 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 470 nouveau_fence_unref(&fence);
6ee73861
BS
471 return ret;
472}
473
474static inline uint32_t
f1ab0cc9
BS
475nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
476 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
6ee73861 477{
f1ab0cc9
BS
478 struct nouveau_bo *nvbo = nouveau_bo(bo);
479
480 if (nvbo->no_vm) {
6ee73861
BS
481 if (mem->mem_type == TTM_PL_TT)
482 return NvDmaGART;
483 return NvDmaVRAM;
484 }
485
486 if (mem->mem_type == TTM_PL_TT)
487 return chan->gart_handle;
488 return chan->vram_handle;
489}
490
491static int
f1ab0cc9
BS
492nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
493 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 494{
6ee73861 495 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
f1ab0cc9
BS
496 struct nouveau_bo *nvbo = nouveau_bo(bo);
497 u64 length = (new_mem->num_pages << PAGE_SHIFT);
498 u64 src_offset, dst_offset;
6ee73861
BS
499 int ret;
500
d961db75
BS
501 src_offset = old_mem->start << PAGE_SHIFT;
502 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
503 if (!nvbo->no_vm) {
504 if (old_mem->mem_type == TTM_PL_VRAM)
6ee73861 505 src_offset += dev_priv->vm_vram_base;
6ee73861 506 else
f1ab0cc9
BS
507 src_offset += dev_priv->vm_gart_base;
508
509 if (new_mem->mem_type == TTM_PL_VRAM)
6ee73861 510 dst_offset += dev_priv->vm_vram_base;
f1ab0cc9
BS
511 else
512 dst_offset += dev_priv->vm_gart_base;
6ee73861
BS
513 }
514
515 ret = RING_SPACE(chan, 3);
516 if (ret)
517 return ret;
6ee73861 518
f1ab0cc9
BS
519 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
520 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
521 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
522
523 while (length) {
524 u32 amount, stride, height;
525
5220b3c1
BS
526 amount = min(length, (u64)(4 * 1024 * 1024));
527 stride = 16 * 4;
f1ab0cc9
BS
528 height = amount / stride;
529
f13b3263
FJ
530 if (new_mem->mem_type == TTM_PL_VRAM &&
531 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
532 ret = RING_SPACE(chan, 8);
533 if (ret)
534 return ret;
535
536 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
537 OUT_RING (chan, 0);
5220b3c1 538 OUT_RING (chan, 0);
f1ab0cc9
BS
539 OUT_RING (chan, stride);
540 OUT_RING (chan, height);
541 OUT_RING (chan, 1);
542 OUT_RING (chan, 0);
543 OUT_RING (chan, 0);
544 } else {
545 ret = RING_SPACE(chan, 2);
546 if (ret)
547 return ret;
548
549 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
550 OUT_RING (chan, 1);
551 }
f13b3263
FJ
552 if (old_mem->mem_type == TTM_PL_VRAM &&
553 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
554 ret = RING_SPACE(chan, 8);
555 if (ret)
556 return ret;
557
558 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
559 OUT_RING (chan, 0);
5220b3c1 560 OUT_RING (chan, 0);
f1ab0cc9
BS
561 OUT_RING (chan, stride);
562 OUT_RING (chan, height);
563 OUT_RING (chan, 1);
564 OUT_RING (chan, 0);
565 OUT_RING (chan, 0);
566 } else {
567 ret = RING_SPACE(chan, 2);
568 if (ret)
569 return ret;
570
571 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
572 OUT_RING (chan, 1);
573 }
574
575 ret = RING_SPACE(chan, 14);
6ee73861
BS
576 if (ret)
577 return ret;
f1ab0cc9
BS
578
579 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
580 OUT_RING (chan, upper_32_bits(src_offset));
581 OUT_RING (chan, upper_32_bits(dst_offset));
582 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
583 OUT_RING (chan, lower_32_bits(src_offset));
584 OUT_RING (chan, lower_32_bits(dst_offset));
585 OUT_RING (chan, stride);
586 OUT_RING (chan, stride);
587 OUT_RING (chan, stride);
588 OUT_RING (chan, height);
589 OUT_RING (chan, 0x00000101);
590 OUT_RING (chan, 0x00000000);
591 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
592 OUT_RING (chan, 0);
593
594 length -= amount;
595 src_offset += amount;
596 dst_offset += amount;
6ee73861
BS
597 }
598
f1ab0cc9
BS
599 return 0;
600}
601
602static int
603nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
604 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
605{
d961db75
BS
606 u32 src_offset = old_mem->start << PAGE_SHIFT;
607 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
608 u32 page_count = new_mem->num_pages;
609 int ret;
610
611 ret = RING_SPACE(chan, 3);
612 if (ret)
613 return ret;
614
615 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
616 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
617 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
618
6ee73861
BS
619 page_count = new_mem->num_pages;
620 while (page_count) {
621 int line_count = (page_count > 2047) ? 2047 : page_count;
622
6ee73861
BS
623 ret = RING_SPACE(chan, 11);
624 if (ret)
625 return ret;
f1ab0cc9 626
6ee73861
BS
627 BEGIN_RING(chan, NvSubM2MF,
628 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
629 OUT_RING (chan, src_offset);
630 OUT_RING (chan, dst_offset);
631 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
632 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
633 OUT_RING (chan, PAGE_SIZE); /* line_length */
634 OUT_RING (chan, line_count);
635 OUT_RING (chan, 0x00000101);
636 OUT_RING (chan, 0x00000000);
6ee73861 637 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 638 OUT_RING (chan, 0);
6ee73861
BS
639
640 page_count -= line_count;
641 src_offset += (PAGE_SIZE * line_count);
642 dst_offset += (PAGE_SIZE * line_count);
643 }
644
f1ab0cc9
BS
645 return 0;
646}
647
648static int
649nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
650 bool no_wait_reserve, bool no_wait_gpu,
651 struct ttm_mem_reg *new_mem)
652{
653 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
654 struct nouveau_bo *nvbo = nouveau_bo(bo);
655 struct nouveau_channel *chan;
656 int ret;
657
658 chan = nvbo->channel;
6a6b73f2 659 if (!chan || nvbo->no_vm) {
f1ab0cc9 660 chan = dev_priv->channel;
e419cf09 661 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
6a6b73f2 662 }
f1ab0cc9
BS
663
664 if (dev_priv->card_type < NV_50)
665 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
666 else
667 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
668 if (ret == 0) {
669 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
670 no_wait_reserve,
671 no_wait_gpu, new_mem);
672 }
f1ab0cc9 673
6a6b73f2
BS
674 if (chan == dev_priv->channel)
675 mutex_unlock(&chan->mutex);
676 return ret;
6ee73861
BS
677}
678
679static int
680nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
681 bool no_wait_reserve, bool no_wait_gpu,
682 struct ttm_mem_reg *new_mem)
6ee73861
BS
683{
684 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
685 struct ttm_placement placement;
686 struct ttm_mem_reg tmp_mem;
687 int ret;
688
689 placement.fpfn = placement.lpfn = 0;
690 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 691 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
692
693 tmp_mem = *new_mem;
694 tmp_mem.mm_node = NULL;
9d87fa21 695 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
696 if (ret)
697 return ret;
698
699 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
700 if (ret)
701 goto out;
702
9d87fa21 703 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
704 if (ret)
705 goto out;
706
9d87fa21 707 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 708out:
42311ff9 709 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
710 return ret;
711}
712
713static int
714nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
715 bool no_wait_reserve, bool no_wait_gpu,
716 struct ttm_mem_reg *new_mem)
6ee73861
BS
717{
718 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
719 struct ttm_placement placement;
720 struct ttm_mem_reg tmp_mem;
721 int ret;
722
723 placement.fpfn = placement.lpfn = 0;
724 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 725 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
726
727 tmp_mem = *new_mem;
728 tmp_mem.mm_node = NULL;
9d87fa21 729 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
730 if (ret)
731 return ret;
732
9d87fa21 733 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
734 if (ret)
735 goto out;
736
9d87fa21 737 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
738 if (ret)
739 goto out;
740
741out:
42311ff9 742 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
743 return ret;
744}
745
746static int
a0af9add
FJ
747nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
748 struct nouveau_tile_reg **new_tile)
6ee73861
BS
749{
750 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 751 struct drm_device *dev = dev_priv->dev;
a0af9add
FJ
752 struct nouveau_bo *nvbo = nouveau_bo(bo);
753 uint64_t offset;
6ee73861
BS
754 int ret;
755
a0af9add
FJ
756 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
757 /* Nothing to do. */
758 *new_tile = NULL;
759 return 0;
760 }
761
d961db75 762 offset = new_mem->start << PAGE_SHIFT;
6ee73861 763
a0af9add 764 if (dev_priv->card_type == NV_50) {
6ee73861
BS
765 ret = nv50_mem_vm_bind_linear(dev,
766 offset + dev_priv->vm_vram_base,
f13b3263
FJ
767 new_mem->size,
768 nouveau_bo_tile_layout(nvbo),
6ee73861
BS
769 offset);
770 if (ret)
771 return ret;
a0af9add
FJ
772
773 } else if (dev_priv->card_type >= NV_10) {
774 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
775 nvbo->tile_mode,
776 nvbo->tile_flags);
6ee73861
BS
777 }
778
a0af9add
FJ
779 return 0;
780}
781
782static void
783nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
784 struct nouveau_tile_reg *new_tile,
785 struct nouveau_tile_reg **old_tile)
786{
787 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
788 struct drm_device *dev = dev_priv->dev;
789
790 if (dev_priv->card_type >= NV_10 &&
791 dev_priv->card_type < NV_50) {
a5cf68b0 792 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
a0af9add
FJ
793 *old_tile = new_tile;
794 }
795}
796
797static int
798nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
799 bool no_wait_reserve, bool no_wait_gpu,
800 struct ttm_mem_reg *new_mem)
a0af9add
FJ
801{
802 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
803 struct nouveau_bo *nvbo = nouveau_bo(bo);
804 struct ttm_mem_reg *old_mem = &bo->mem;
805 struct nouveau_tile_reg *new_tile = NULL;
806 int ret = 0;
807
808 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
809 if (ret)
810 return ret;
811
a0af9add 812 /* Fake bo copy. */
6ee73861
BS
813 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
814 BUG_ON(bo->mem.mm_node != NULL);
815 bo->mem = *new_mem;
816 new_mem->mm_node = NULL;
a0af9add 817 goto out;
6ee73861
BS
818 }
819
b8a6a804
BS
820 /* Software copy if the card isn't up and running yet. */
821 if (!dev_priv->channel) {
822 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
823 goto out;
824 }
825
a0af9add
FJ
826 /* Hardware assisted copy. */
827 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 828 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 829 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 830 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 831 else
9d87fa21 832 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 833
a0af9add
FJ
834 if (!ret)
835 goto out;
836
837 /* Fallback to software copy. */
9d87fa21 838 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
839
840out:
841 if (ret)
842 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
843 else
844 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
845
846 return ret;
6ee73861
BS
847}
848
849static int
850nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
851{
852 return 0;
853}
854
f32f02fd
JG
855static int
856nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
857{
858 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
859 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
860 struct drm_device *dev = dev_priv->dev;
861
862 mem->bus.addr = NULL;
863 mem->bus.offset = 0;
864 mem->bus.size = mem->num_pages << PAGE_SHIFT;
865 mem->bus.base = 0;
866 mem->bus.is_iomem = false;
867 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
868 return -EINVAL;
869 switch (mem->mem_type) {
870 case TTM_PL_SYSTEM:
871 /* System memory */
872 return 0;
873 case TTM_PL_TT:
874#if __OS_HAS_AGP
875 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 876 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
877 mem->bus.base = dev_priv->gart_info.aper_base;
878 mem->bus.is_iomem = true;
879 }
880#endif
881 break;
882 case TTM_PL_VRAM:
d961db75 883 mem->bus.offset = mem->start << PAGE_SHIFT;
01d73a69 884 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd
JG
885 mem->bus.is_iomem = true;
886 break;
887 default:
888 return -EINVAL;
889 }
890 return 0;
891}
892
893static void
894nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
895{
896}
897
898static int
899nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
900{
e1429b4c
BS
901 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
902 struct nouveau_bo *nvbo = nouveau_bo(bo);
903
904 /* as long as the bo isn't in vram, and isn't tiled, we've got
905 * nothing to do here.
906 */
907 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
908 if (dev_priv->card_type < NV_50 ||
909 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
910 return 0;
911 }
912
913 /* make sure bo is in mappable vram */
d961db75 914 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
915 return 0;
916
917
918 nvbo->placement.fpfn = 0;
919 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
920 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
7a45d764 921 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
922}
923
332b242f
FJ
924void
925nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
926{
23c45e8e 927 struct nouveau_fence *old_fence;
332b242f
FJ
928
929 if (likely(fence))
23c45e8e 930 nouveau_fence_ref(fence);
332b242f 931
23c45e8e
FJ
932 spin_lock(&nvbo->bo.bdev->fence_lock);
933 old_fence = nvbo->bo.sync_obj;
934 nvbo->bo.sync_obj = fence;
332b242f 935 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
936
937 nouveau_fence_unref(&old_fence);
332b242f
FJ
938}
939
6ee73861
BS
940struct ttm_bo_driver nouveau_bo_driver = {
941 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
942 .invalidate_caches = nouveau_bo_invalidate_caches,
943 .init_mem_type = nouveau_bo_init_mem_type,
944 .evict_flags = nouveau_bo_evict_flags,
945 .move = nouveau_bo_move,
946 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
947 .sync_obj_signaled = __nouveau_fence_signalled,
948 .sync_obj_wait = __nouveau_fence_wait,
949 .sync_obj_flush = __nouveau_fence_flush,
950 .sync_obj_unref = __nouveau_fence_unref,
951 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
952 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
953 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
954 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
955};
956