]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/ttm/ttm_bo_util.c
drm/ttm: remove move to new and inline into remainging place.
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
CommitLineData
1297bf2e 1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
ba4e7d97
TH
2/**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
760285e7
DH
32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_placement.h>
72525b3f 34#include <drm/drm_vma_manager.h>
ba4e7d97
TH
35#include <linux/io.h>
36#include <linux/highmem.h>
37#include <linux/wait.h>
5a0e3ad6 38#include <linux/slab.h>
ba4e7d97 39#include <linux/vmalloc.h>
ba4e7d97 40#include <linux/module.h>
52791eee 41#include <linux/dma-resv.h>
ba4e7d97 42
5452cf44
CK
43struct ttm_transfer_obj {
44 struct ttm_buffer_object base;
45 struct ttm_buffer_object *bo;
46};
47
afe6804c 48int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
2966141a 49 struct ttm_resource *mem)
eba67093 50{
54d04ea8 51 if (mem->bus.offset || mem->bus.addr)
c1c440d4 52 return 0;
eba67093 53
fe662d84 54 mem->bus.is_iomem = false;
eba67093
TH
55 if (!bdev->driver->io_mem_reserve)
56 return 0;
eba67093 57
fe662d84 58 return bdev->driver->io_mem_reserve(bdev, mem);
eba67093
TH
59}
60
afe6804c 61void ttm_mem_io_free(struct ttm_bo_device *bdev,
2966141a 62 struct ttm_resource *mem)
eba67093 63{
54d04ea8 64 if (!mem->bus.offset && !mem->bus.addr)
c1c440d4 65 return;
eba67093 66
fe662d84
CK
67 if (bdev->driver->io_mem_free)
68 bdev->driver->io_mem_free(bdev, mem);
c1c440d4 69
fe662d84
CK
70 mem->bus.offset = 0;
71 mem->bus.addr = NULL;
82c5da6b
JG
72}
73
2966141a
DA
74static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
75 struct ttm_resource *mem,
c1c440d4 76 void **virtual)
ba4e7d97 77{
ba4e7d97
TH
78 int ret;
79 void *addr;
80
81 *virtual = NULL;
82c5da6b 82 ret = ttm_mem_io_reserve(bdev, mem);
9e51159c 83 if (ret || !mem->bus.is_iomem)
ba4e7d97
TH
84 return ret;
85
82c5da6b
JG
86 if (mem->bus.addr) {
87 addr = mem->bus.addr;
88 } else {
ebb21aa1
DA
89 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
90
ce65b874 91 if (mem->bus.caching == ttm_write_combined)
54d04ea8 92 addr = ioremap_wc(mem->bus.offset, bus_size);
ba4e7d97 93 else
54d04ea8 94 addr = ioremap(mem->bus.offset, bus_size);
82c5da6b
JG
95 if (!addr) {
96 ttm_mem_io_free(bdev, mem);
ba4e7d97 97 return -ENOMEM;
82c5da6b 98 }
ba4e7d97
TH
99 }
100 *virtual = addr;
101 return 0;
102}
103
2966141a
DA
104static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
105 struct ttm_resource *mem,
c1c440d4 106 void *virtual)
ba4e7d97 107{
0c321c79 108 if (virtual && mem->bus.addr == NULL)
ba4e7d97 109 iounmap(virtual);
82c5da6b 110 ttm_mem_io_free(bdev, mem);
ba4e7d97
TH
111}
112
113static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
114{
115 uint32_t *dstP =
116 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
117 uint32_t *srcP =
118 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
119
120 int i;
121 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
122 iowrite32(ioread32(srcP++), dstP++);
123 return 0;
124}
125
126static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
542c6f6d
TH
127 unsigned long page,
128 pgprot_t prot)
ba4e7d97 129{
b1e5f172 130 struct page *d = ttm->pages[page];
ba4e7d97
TH
131 void *dst;
132
133 if (!d)
134 return -ENOMEM;
135
136 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
915ecc22 137 dst = kmap_atomic_prot(d, prot);
ba4e7d97
TH
138 if (!dst)
139 return -ENOMEM;
140
141 memcpy_fromio(dst, src, PAGE_SIZE);
542c6f6d 142
915ecc22 143 kunmap_atomic(dst);
542c6f6d 144
ba4e7d97
TH
145 return 0;
146}
147
148static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
542c6f6d
TH
149 unsigned long page,
150 pgprot_t prot)
ba4e7d97 151{
b1e5f172 152 struct page *s = ttm->pages[page];
ba4e7d97
TH
153 void *src;
154
155 if (!s)
156 return -ENOMEM;
157
158 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
915ecc22 159 src = kmap_atomic_prot(s, prot);
ba4e7d97
TH
160 if (!src)
161 return -ENOMEM;
162
163 memcpy_toio(dst, src, PAGE_SIZE);
542c6f6d 164
915ecc22 165 kunmap_atomic(src);
542c6f6d 166
ba4e7d97
TH
167 return 0;
168}
169
170int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
3e98d829 171 struct ttm_operation_ctx *ctx,
2966141a 172 struct ttm_resource *new_mem)
ba4e7d97
TH
173{
174 struct ttm_bo_device *bdev = bo->bdev;
9de59bc2 175 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
ba4e7d97 176 struct ttm_tt *ttm = bo->ttm;
2966141a
DA
177 struct ttm_resource *old_mem = &bo->mem;
178 struct ttm_resource old_copy = *old_mem;
ba4e7d97
TH
179 void *old_iomap;
180 void *new_iomap;
181 int ret;
ba4e7d97
TH
182 unsigned long i;
183 unsigned long page;
184 unsigned long add = 0;
185 int dir;
186
0ef1ed81 187 ret = ttm_bo_wait_ctx(bo, ctx);
77dfc28b
CK
188 if (ret)
189 return ret;
190
2966141a 191 ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
ba4e7d97
TH
192 if (ret)
193 return ret;
2966141a 194 ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
ba4e7d97
TH
195 if (ret)
196 goto out;
197
da95c788
TH
198 /*
199 * Single TTM move. NOP.
200 */
ba4e7d97
TH
201 if (old_iomap == NULL && new_iomap == NULL)
202 goto out2;
da95c788
TH
203
204 /*
0bc25425 205 * Don't move nonexistent data. Clear destination instead.
da95c788 206 */
0bc25425 207 if (old_iomap == NULL &&
7eec9151 208 (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
2e6d8b46 209 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
0bc25425 210 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
ba4e7d97 211 goto out2;
0bc25425 212 }
ba4e7d97 213
da95c788
TH
214 /*
215 * TTM might be null for moves within the same region.
9a0599dd 216 */
25893a14 217 if (ttm) {
0a667b50 218 ret = ttm_tt_populate(bdev, ttm, ctx);
da95c788 219 if (ret)
b1e5f172
JG
220 goto out1;
221 }
222
ba4e7d97
TH
223 add = 0;
224 dir = 1;
225
226 if ((old_mem->mem_type == new_mem->mem_type) &&
d961db75 227 (new_mem->start < old_mem->start + old_mem->size)) {
ba4e7d97
TH
228 dir = -1;
229 add = new_mem->num_pages - 1;
230 }
231
232 for (i = 0; i < new_mem->num_pages; ++i) {
233 page = i * dir + add;
542c6f6d 234 if (old_iomap == NULL) {
867bcecd 235 pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
542c6f6d
TH
236 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
237 prot);
238 } else if (new_iomap == NULL) {
867bcecd 239 pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
542c6f6d
TH
240 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
241 prot);
449f797a 242 } else {
ba4e7d97 243 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
449f797a 244 }
da95c788 245 if (ret)
ba4e7d97
TH
246 goto out1;
247 }
248 mb();
249out2:
eba67093 250 old_copy = *old_mem;
2ee476f7
DA
251
252 ttm_bo_assign_mem(bo, new_mem);
ba4e7d97 253
2ff6e69c
DA
254 if (!man->use_tt)
255 ttm_bo_tt_destroy(bo);
ba4e7d97
TH
256
257out1:
2966141a 258 ttm_resource_iounmap(bdev, old_mem, new_iomap);
ba4e7d97 259out:
2966141a 260 ttm_resource_iounmap(bdev, &old_copy, old_iomap);
da95c788
TH
261
262 /*
263 * On error, keep the mm node!
264 */
265 if (!ret)
b2458726 266 ttm_resource_free(bo, &old_copy);
ba4e7d97
TH
267 return ret;
268}
269EXPORT_SYMBOL(ttm_bo_move_memcpy);
270
271static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
272{
5452cf44
CK
273 struct ttm_transfer_obj *fbo;
274
275 fbo = container_of(bo, struct ttm_transfer_obj, base);
f4490759 276 ttm_bo_put(fbo->bo);
5452cf44 277 kfree(fbo);
ba4e7d97
TH
278}
279
280/**
281 * ttm_buffer_object_transfer
282 *
283 * @bo: A pointer to a struct ttm_buffer_object.
284 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
285 * holding the data of @bo with the old placement.
286 *
287 * This is a utility function that may be called after an accelerated move
288 * has been scheduled. A new buffer object is created as a placeholder for
289 * the old data while it's being copied. When that buffer object is idle,
290 * it can be destroyed, releasing the space of the old placement.
291 * Returns:
292 * !0: Failure.
293 */
294
295static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
296 struct ttm_buffer_object **new_obj)
297{
5452cf44 298 struct ttm_transfer_obj *fbo;
5e338405 299 int ret;
ba4e7d97 300
ff7c60c5 301 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
ba4e7d97
TH
302 if (!fbo)
303 return -ENOMEM;
304
5452cf44 305 fbo->base = *bo;
d6e820fc
CK
306
307 ttm_bo_get(bo);
8129fdad 308 fbo->bo = bo;
ba4e7d97
TH
309
310 /**
311 * Fix up members that we shouldn't copy directly:
312 * TODO: Explicit member copy would probably be better here.
313 */
314
97588b5b 315 atomic_inc(&ttm_bo_glob.bo_count);
5452cf44
CK
316 INIT_LIST_HEAD(&fbo->base.ddestroy);
317 INIT_LIST_HEAD(&fbo->base.lru);
318 INIT_LIST_HEAD(&fbo->base.swap);
5452cf44 319 fbo->base.moving = NULL;
b96f3e7c 320 drm_vma_node_reset(&fbo->base.base.vma_node);
5452cf44 321
5452cf44
CK
322 kref_init(&fbo->base.kref);
323 fbo->base.destroy = &ttm_transfered_destroy;
324 fbo->base.acc_size = 0;
deb0814b 325 fbo->base.pin_count = 1;
5b34406f 326 if (bo->type != ttm_bo_type_sg)
ef383218
CK
327 fbo->base.base.resv = &fbo->base.base._resv;
328
329 dma_resv_init(&fbo->base.base._resv);
8c8c0620 330 fbo->base.base.dev = NULL;
ef383218 331 ret = dma_resv_trylock(&fbo->base.base._resv);
5e338405 332 WARN_ON(!ret);
ba4e7d97 333
5452cf44 334 *new_obj = &fbo->base;
ba4e7d97
TH
335 return 0;
336}
337
867bcecd
CK
338pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
339 pgprot_t tmp)
ba4e7d97 340{
867bcecd
CK
341 struct ttm_resource_manager *man;
342 enum ttm_caching caching;
343
344 man = ttm_manager_type(bo->bdev, res->mem_type);
345 caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
346
94318d50 347 /* Cached mappings need no adjustment */
867bcecd 348 if (caching == ttm_cached)
94318d50
BH
349 return tmp;
350
ba4e7d97 351#if defined(__i386__) || defined(__x86_64__)
867bcecd 352 if (caching == ttm_write_combined)
ba4e7d97
TH
353 tmp = pgprot_writecombine(tmp);
354 else if (boot_cpu_data.x86 > 3)
355 tmp = pgprot_noncached(tmp);
ba4e7d97 356#endif
f135b978 357#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
8a08e50c 358 defined(__powerpc__) || defined(__mips__)
867bcecd 359 if (caching == ttm_write_combined)
ba4e7d97
TH
360 tmp = pgprot_writecombine(tmp);
361 else
362 tmp = pgprot_noncached(tmp);
363#endif
8a08e50c 364#if defined(__sparc__)
94318d50 365 tmp = pgprot_noncached(tmp);
ba4e7d97
TH
366#endif
367 return tmp;
368}
4bfd75cb 369EXPORT_SYMBOL(ttm_io_prot);
ba4e7d97
TH
370
371static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
82c5da6b
JG
372 unsigned long offset,
373 unsigned long size,
ba4e7d97
TH
374 struct ttm_bo_kmap_obj *map)
375{
2966141a 376 struct ttm_resource *mem = &bo->mem;
ba4e7d97 377
82c5da6b 378 if (bo->mem.bus.addr) {
ba4e7d97 379 map->bo_kmap_type = ttm_bo_map_premapped;
82c5da6b 380 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
ba4e7d97
TH
381 } else {
382 map->bo_kmap_type = ttm_bo_map_iomap;
ce65b874 383 if (mem->bus.caching == ttm_write_combined)
54d04ea8 384 map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
82c5da6b 385 size);
ba4e7d97 386 else
54d04ea8 387 map->virtual = ioremap(bo->mem.bus.offset + offset,
c1c440d4 388 size);
ba4e7d97
TH
389 }
390 return (!map->virtual) ? -ENOMEM : 0;
391}
392
393static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
394 unsigned long start_page,
395 unsigned long num_pages,
396 struct ttm_bo_kmap_obj *map)
397{
2966141a 398 struct ttm_resource *mem = &bo->mem;
d0cef9fa
RH
399 struct ttm_operation_ctx ctx = {
400 .interruptible = false,
401 .no_wait_gpu = false
402 };
62975d27 403 struct ttm_tt *ttm = bo->ttm;
d0cef9fa 404 pgprot_t prot;
b1e5f172 405 int ret;
ba4e7d97 406
62975d27 407 BUG_ON(!ttm);
b1e5f172 408
0a667b50 409 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
25893a14
CK
410 if (ret)
411 return ret;
b1e5f172 412
ce65b874 413 if (num_pages == 1 && ttm->caching == ttm_cached) {
ba4e7d97
TH
414 /*
415 * We're mapping a single page, and the desired
416 * page protection is consistent with the bo.
417 */
418
419 map->bo_kmap_type = ttm_bo_map_kmap;
b1e5f172 420 map->page = ttm->pages[start_page];
ba4e7d97
TH
421 map->virtual = kmap(map->page);
422 } else {
ba4e7d97
TH
423 /*
424 * We need to use vmap to get the desired page protection
af901ca1 425 * or to make the buffer object look contiguous.
ba4e7d97 426 */
867bcecd 427 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
ba4e7d97
TH
428 map->bo_kmap_type = ttm_bo_map_vmap;
429 map->virtual = vmap(ttm->pages + start_page, num_pages,
430 0, prot);
431 }
432 return (!map->virtual) ? -ENOMEM : 0;
433}
434
435int ttm_bo_kmap(struct ttm_buffer_object *bo,
436 unsigned long start_page, unsigned long num_pages,
437 struct ttm_bo_kmap_obj *map)
438{
82c5da6b 439 unsigned long offset, size;
ba4e7d97 440 int ret;
ba4e7d97 441
ba4e7d97 442 map->virtual = NULL;
82c5da6b 443 map->bo = bo;
ba4e7d97
TH
444 if (num_pages > bo->num_pages)
445 return -EINVAL;
446 if (start_page > bo->num_pages)
447 return -EINVAL;
02b29caf 448
82c5da6b 449 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
ba4e7d97
TH
450 if (ret)
451 return ret;
82c5da6b 452 if (!bo->mem.bus.is_iomem) {
ba4e7d97
TH
453 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
454 } else {
82c5da6b
JG
455 offset = start_page << PAGE_SHIFT;
456 size = num_pages << PAGE_SHIFT;
457 return ttm_bo_ioremap(bo, offset, size, map);
ba4e7d97
TH
458 }
459}
460EXPORT_SYMBOL(ttm_bo_kmap);
461
462void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
463{
464 if (!map->virtual)
465 return;
466 switch (map->bo_kmap_type) {
467 case ttm_bo_map_iomap:
468 iounmap(map->virtual);
469 break;
470 case ttm_bo_map_vmap:
471 vunmap(map->virtual);
472 break;
473 case ttm_bo_map_kmap:
474 kunmap(map->page);
475 break;
476 case ttm_bo_map_premapped:
477 break;
478 default:
479 BUG();
480 }
eba67093 481 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
ba4e7d97
TH
482 map->virtual = NULL;
483 map->page = NULL;
484}
485EXPORT_SYMBOL(ttm_bo_kunmap);
486
92afce90
DA
487static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
488 bool dst_use_tt)
489{
490 int ret;
491 ret = ttm_bo_wait(bo, false, false);
492 if (ret)
493 return ret;
494
495 if (!dst_use_tt)
496 ttm_bo_tt_destroy(bo);
d1934d2b 497 ttm_resource_free(bo, &bo->mem);
92afce90
DA
498 return 0;
499}
500
13a8f46d
DA
501static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
502 struct dma_fence *fence,
503 bool dst_use_tt)
504{
505 struct ttm_buffer_object *ghost_obj;
506 int ret;
507
508 /**
509 * This should help pipeline ordinary buffer moves.
510 *
511 * Hang old buffer memory on a new buffer object,
512 * and leave it to be released when the GPU
513 * operation has completed.
514 */
515
516 dma_fence_put(bo->moving);
517 bo->moving = dma_fence_get(fence);
518
519 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
520 if (ret)
521 return ret;
522
523 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
524
525 /**
526 * If we're not moving to fixed memory, the TTM object
527 * needs to stay alive. Otherwhise hang it on the ghost
528 * bo to be unbound and destroyed.
529 */
530
531 if (dst_use_tt)
532 ghost_obj->ttm = NULL;
533 else
534 bo->ttm = NULL;
535
536 dma_resv_unlock(&ghost_obj->base._resv);
537 ttm_bo_put(ghost_obj);
538 return 0;
539}
540
e46f468f
DA
541static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
542 struct dma_fence *fence)
ba4e7d97
TH
543{
544 struct ttm_bo_device *bdev = bo->bdev;
e46f468f 545 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
ba4e7d97 546
e46f468f
DA
547 /**
548 * BO doesn't have a TTM we need to bind/unbind. Just remember
549 * this eviction and free up the allocation
550 */
551 spin_lock(&from->move_lock);
552 if (!from->move || dma_fence_is_later(fence, from->move)) {
553 dma_fence_put(from->move);
554 from->move = dma_fence_get(fence);
555 }
556 spin_unlock(&from->move_lock);
ba4e7d97 557
d1934d2b 558 ttm_resource_free(bo, &bo->mem);
110b20c3 559
e46f468f
DA
560 dma_fence_put(bo->moving);
561 bo->moving = dma_fence_get(fence);
ba4e7d97 562}
3ddf4ad9 563
e46f468f
DA
564int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
565 struct dma_fence *fence,
566 bool evict,
567 bool pipeline,
568 struct ttm_resource *new_mem)
3ddf4ad9
CK
569{
570 struct ttm_bo_device *bdev = bo->bdev;
2ee476f7 571 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
e46f468f
DA
572 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
573 int ret = 0;
3ddf4ad9 574
52791eee 575 dma_resv_add_excl_fence(bo->base.resv, fence);
e46f468f
DA
576 if (!evict)
577 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
578 else if (!from->use_tt && pipeline)
579 ttm_bo_move_pipeline_evict(bo, fence);
580 else
581 ret = ttm_bo_wait_free_node(bo, man->use_tt);
3ddf4ad9 582
e46f468f
DA
583 if (ret)
584 return ret;
3ddf4ad9 585
2ee476f7 586 ttm_bo_assign_mem(bo, new_mem);
3ddf4ad9
CK
587
588 return 0;
589}
e46f468f 590EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
5d951098
CK
591
592int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
593{
594 struct ttm_buffer_object *ghost;
595 int ret;
596
597 ret = ttm_buffer_object_transfer(bo, &ghost);
598 if (ret)
599 return ret;
600
ef383218 601 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
5d951098
CK
602 /* Last resort, wait for the BO to be idle when we are OOM */
603 if (ret)
604 ttm_bo_wait(bo, false, false);
605
606 memset(&bo->mem, 0, sizeof(bo->mem));
607 bo->mem.mem_type = TTM_PL_SYSTEM;
608 bo->ttm = NULL;
609
ef383218 610 dma_resv_unlock(&ghost->base._resv);
f4490759 611 ttm_bo_put(ghost);
5d951098
CK
612
613 return 0;
614}