]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/ttm/ttm_bo_util.c
drm/rcar-du: Fix error check when retrieving crtc state
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
CommitLineData
1297bf2e 1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
ba4e7d97
TH
2/**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
760285e7
DH
32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_placement.h>
72525b3f 34#include <drm/drm_vma_manager.h>
ba4e7d97
TH
35#include <linux/io.h>
36#include <linux/highmem.h>
37#include <linux/wait.h>
5a0e3ad6 38#include <linux/slab.h>
ba4e7d97 39#include <linux/vmalloc.h>
ba4e7d97 40#include <linux/module.h>
f2c24b83 41#include <linux/reservation.h>
ba4e7d97 42
5452cf44
CK
43struct ttm_transfer_obj {
44 struct ttm_buffer_object base;
45 struct ttm_buffer_object *bo;
46};
47
ba4e7d97
TH
48void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49{
42311ff9 50 ttm_bo_mem_put(bo, &bo->mem);
ba4e7d97
TH
51}
52
53int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
3e98d829 54 struct ttm_operation_ctx *ctx,
4e2f0caa 55 struct ttm_mem_reg *new_mem)
ba4e7d97
TH
56{
57 struct ttm_tt *ttm = bo->ttm;
58 struct ttm_mem_reg *old_mem = &bo->mem;
ba4e7d97
TH
59 int ret;
60
61 if (old_mem->mem_type != TTM_PL_SYSTEM) {
3e98d829 62 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
7b8082bc
MD
63
64 if (unlikely(ret != 0)) {
65 if (ret != -ERESTARTSYS)
66 pr_err("Failed to expire sync object before unbinding TTM\n");
67 return ret;
68 }
69
2ff2bf1e 70 ttm_tt_unbind(ttm);
ba4e7d97
TH
71 ttm_bo_free_old_node(bo);
72 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
73 TTM_PL_MASK_MEM);
74 old_mem->mem_type = TTM_PL_SYSTEM;
ba4e7d97
TH
75 }
76
77 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
78 if (unlikely(ret != 0))
79 return ret;
80
81 if (new_mem->mem_type != TTM_PL_SYSTEM) {
993baf15 82 ret = ttm_tt_bind(ttm, new_mem, ctx);
ba4e7d97
TH
83 if (unlikely(ret != 0))
84 return ret;
85 }
86
87 *old_mem = *new_mem;
88 new_mem->mm_node = NULL;
110b20c3 89
ba4e7d97
TH
90 return 0;
91}
92EXPORT_SYMBOL(ttm_bo_move_ttm);
93
eba67093 94int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
82c5da6b 95{
eba67093
TH
96 if (likely(man->io_reserve_fastpath))
97 return 0;
98
99 if (interruptible)
100 return mutex_lock_interruptible(&man->io_reserve_mutex);
101
102 mutex_lock(&man->io_reserve_mutex);
103 return 0;
104}
afe6804c 105EXPORT_SYMBOL(ttm_mem_io_lock);
82c5da6b 106
eba67093
TH
107void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
108{
109 if (likely(man->io_reserve_fastpath))
110 return;
111
112 mutex_unlock(&man->io_reserve_mutex);
113}
afe6804c 114EXPORT_SYMBOL(ttm_mem_io_unlock);
eba67093
TH
115
116static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
117{
118 struct ttm_buffer_object *bo;
119
120 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
121 return -EAGAIN;
122
123 bo = list_first_entry(&man->io_reserve_lru,
124 struct ttm_buffer_object,
125 io_reserve_lru);
126 list_del_init(&bo->io_reserve_lru);
127 ttm_bo_unmap_virtual_locked(bo);
128
129 return 0;
130}
131
afe6804c
DA
132
133int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
134 struct ttm_mem_reg *mem)
eba67093
TH
135{
136 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
137 int ret = 0;
138
139 if (!bdev->driver->io_mem_reserve)
140 return 0;
141 if (likely(man->io_reserve_fastpath))
142 return bdev->driver->io_mem_reserve(bdev, mem);
143
144 if (bdev->driver->io_mem_reserve &&
145 mem->bus.io_reserved_count++ == 0) {
146retry:
0c321c79 147 ret = bdev->driver->io_mem_reserve(bdev, mem);
eba67093
TH
148 if (ret == -EAGAIN) {
149 ret = ttm_mem_io_evict(man);
150 if (ret == 0)
151 goto retry;
152 }
153 }
154 return ret;
155}
afe6804c 156EXPORT_SYMBOL(ttm_mem_io_reserve);
eba67093 157
afe6804c
DA
158void ttm_mem_io_free(struct ttm_bo_device *bdev,
159 struct ttm_mem_reg *mem)
eba67093
TH
160{
161 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
162
163 if (likely(man->io_reserve_fastpath))
164 return;
165
166 if (bdev->driver->io_mem_reserve &&
167 --mem->bus.io_reserved_count == 0 &&
168 bdev->driver->io_mem_free)
169 bdev->driver->io_mem_free(bdev, mem);
170
171}
afe6804c 172EXPORT_SYMBOL(ttm_mem_io_free);
eba67093
TH
173
174int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
175{
176 struct ttm_mem_reg *mem = &bo->mem;
177 int ret;
178
179 if (!mem->bus.io_reserved_vm) {
180 struct ttm_mem_type_manager *man =
181 &bo->bdev->man[mem->mem_type];
182
183 ret = ttm_mem_io_reserve(bo->bdev, mem);
82c5da6b
JG
184 if (unlikely(ret != 0))
185 return ret;
eba67093
TH
186 mem->bus.io_reserved_vm = true;
187 if (man->use_io_reserve_lru)
188 list_add_tail(&bo->io_reserve_lru,
189 &man->io_reserve_lru);
82c5da6b
JG
190 }
191 return 0;
192}
193
eba67093 194void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
82c5da6b 195{
eba67093
TH
196 struct ttm_mem_reg *mem = &bo->mem;
197
198 if (mem->bus.io_reserved_vm) {
199 mem->bus.io_reserved_vm = false;
200 list_del_init(&bo->io_reserve_lru);
201 ttm_mem_io_free(bo->bdev, mem);
82c5da6b
JG
202 }
203}
204
dcbff15a 205static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
ba4e7d97
TH
206 void **virtual)
207{
eba67093 208 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ba4e7d97
TH
209 int ret;
210 void *addr;
211
212 *virtual = NULL;
eba67093 213 (void) ttm_mem_io_lock(man, false);
82c5da6b 214 ret = ttm_mem_io_reserve(bdev, mem);
eba67093 215 ttm_mem_io_unlock(man);
9e51159c 216 if (ret || !mem->bus.is_iomem)
ba4e7d97
TH
217 return ret;
218
82c5da6b
JG
219 if (mem->bus.addr) {
220 addr = mem->bus.addr;
221 } else {
ba4e7d97 222 if (mem->placement & TTM_PL_FLAG_WC)
82c5da6b 223 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
ba4e7d97 224 else
82c5da6b
JG
225 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
226 if (!addr) {
eba67093 227 (void) ttm_mem_io_lock(man, false);
82c5da6b 228 ttm_mem_io_free(bdev, mem);
eba67093 229 ttm_mem_io_unlock(man);
ba4e7d97 230 return -ENOMEM;
82c5da6b 231 }
ba4e7d97
TH
232 }
233 *virtual = addr;
234 return 0;
235}
236
dcbff15a 237static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
ba4e7d97
TH
238 void *virtual)
239{
240 struct ttm_mem_type_manager *man;
241
242 man = &bdev->man[mem->mem_type];
243
0c321c79 244 if (virtual && mem->bus.addr == NULL)
ba4e7d97 245 iounmap(virtual);
eba67093 246 (void) ttm_mem_io_lock(man, false);
82c5da6b 247 ttm_mem_io_free(bdev, mem);
eba67093 248 ttm_mem_io_unlock(man);
ba4e7d97
TH
249}
250
251static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
252{
253 uint32_t *dstP =
254 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
255 uint32_t *srcP =
256 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
257
258 int i;
259 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
260 iowrite32(ioread32(srcP++), dstP++);
261 return 0;
262}
263
403c1826
TH
264#ifdef CONFIG_X86
265#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
266#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
267#else
268#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
269#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
270#endif
271
9c11fcf1
TH
272
273/**
274 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
275 * specified page protection.
276 *
277 * @page: The page to map.
278 * @prot: The page protection.
279 *
280 * This function maps a TTM page using the kmap_atomic api if available,
281 * otherwise falls back to vmap. The user must make sure that the
282 * specified page does not have an aliased mapping with a different caching
283 * policy unless the architecture explicitly allows it. Also mapping and
284 * unmapping using this api must be correctly nested. Unmapping should
285 * occur in the reverse order of mapping.
286 */
287void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
403c1826
TH
288{
289 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
290 return kmap_atomic(page);
291 else
292 return __ttm_kmap_atomic_prot(page, prot);
293}
9c11fcf1 294EXPORT_SYMBOL(ttm_kmap_atomic_prot);
403c1826 295
9c11fcf1
TH
296/**
297 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
298 * ttm_kmap_atomic_prot.
299 *
300 * @addr: The virtual address from the map.
301 * @prot: The page protection.
302 */
303void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
403c1826
TH
304{
305 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
306 kunmap_atomic(addr);
307 else
308 __ttm_kunmap_atomic(addr);
309}
9c11fcf1 310EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
403c1826 311
ba4e7d97 312static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
542c6f6d
TH
313 unsigned long page,
314 pgprot_t prot)
ba4e7d97 315{
b1e5f172 316 struct page *d = ttm->pages[page];
ba4e7d97
TH
317 void *dst;
318
319 if (!d)
320 return -ENOMEM;
321
322 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
403c1826 323 dst = ttm_kmap_atomic_prot(d, prot);
ba4e7d97
TH
324 if (!dst)
325 return -ENOMEM;
326
327 memcpy_fromio(dst, src, PAGE_SIZE);
542c6f6d 328
403c1826 329 ttm_kunmap_atomic_prot(dst, prot);
542c6f6d 330
ba4e7d97
TH
331 return 0;
332}
333
334static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
542c6f6d
TH
335 unsigned long page,
336 pgprot_t prot)
ba4e7d97 337{
b1e5f172 338 struct page *s = ttm->pages[page];
ba4e7d97
TH
339 void *src;
340
341 if (!s)
342 return -ENOMEM;
343
344 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
403c1826 345 src = ttm_kmap_atomic_prot(s, prot);
ba4e7d97
TH
346 if (!src)
347 return -ENOMEM;
348
349 memcpy_toio(dst, src, PAGE_SIZE);
542c6f6d 350
403c1826 351 ttm_kunmap_atomic_prot(src, prot);
542c6f6d 352
ba4e7d97
TH
353 return 0;
354}
355
356int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
3e98d829 357 struct ttm_operation_ctx *ctx,
9d87fa21 358 struct ttm_mem_reg *new_mem)
ba4e7d97
TH
359{
360 struct ttm_bo_device *bdev = bo->bdev;
361 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
362 struct ttm_tt *ttm = bo->ttm;
363 struct ttm_mem_reg *old_mem = &bo->mem;
e22469ca 364 struct ttm_mem_reg old_copy = *old_mem;
ba4e7d97
TH
365 void *old_iomap;
366 void *new_iomap;
367 int ret;
ba4e7d97
TH
368 unsigned long i;
369 unsigned long page;
370 unsigned long add = 0;
371 int dir;
372
3e98d829 373 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
77dfc28b
CK
374 if (ret)
375 return ret;
376
ba4e7d97
TH
377 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
378 if (ret)
379 return ret;
380 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
381 if (ret)
382 goto out;
383
da95c788
TH
384 /*
385 * Single TTM move. NOP.
386 */
ba4e7d97
TH
387 if (old_iomap == NULL && new_iomap == NULL)
388 goto out2;
da95c788
TH
389
390 /*
0bc25425 391 * Don't move nonexistent data. Clear destination instead.
da95c788 392 */
0bc25425 393 if (old_iomap == NULL &&
2e6d8b46
TH
394 (ttm == NULL || (ttm->state == tt_unpopulated &&
395 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
0bc25425 396 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
ba4e7d97 397 goto out2;
0bc25425 398 }
ba4e7d97 399
da95c788
TH
400 /*
401 * TTM might be null for moves within the same region.
9a0599dd 402 */
25893a14
CK
403 if (ttm) {
404 ret = ttm_tt_populate(ttm, ctx);
da95c788 405 if (ret)
b1e5f172
JG
406 goto out1;
407 }
408
ba4e7d97
TH
409 add = 0;
410 dir = 1;
411
412 if ((old_mem->mem_type == new_mem->mem_type) &&
d961db75 413 (new_mem->start < old_mem->start + old_mem->size)) {
ba4e7d97
TH
414 dir = -1;
415 add = new_mem->num_pages - 1;
416 }
417
418 for (i = 0; i < new_mem->num_pages; ++i) {
419 page = i * dir + add;
542c6f6d
TH
420 if (old_iomap == NULL) {
421 pgprot_t prot = ttm_io_prot(old_mem->placement,
422 PAGE_KERNEL);
423 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
424 prot);
425 } else if (new_iomap == NULL) {
426 pgprot_t prot = ttm_io_prot(new_mem->placement,
427 PAGE_KERNEL);
428 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
429 prot);
449f797a 430 } else {
ba4e7d97 431 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
449f797a 432 }
da95c788 433 if (ret)
ba4e7d97
TH
434 goto out1;
435 }
436 mb();
437out2:
eba67093 438 old_copy = *old_mem;
ba4e7d97
TH
439 *old_mem = *new_mem;
440 new_mem->mm_node = NULL;
ba4e7d97 441
4279cb14 442 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ba4e7d97
TH
443 ttm_tt_destroy(ttm);
444 bo->ttm = NULL;
445 }
446
447out1:
eba67093 448 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
ba4e7d97
TH
449out:
450 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
da95c788
TH
451
452 /*
453 * On error, keep the mm node!
454 */
455 if (!ret)
456 ttm_bo_mem_put(bo, &old_copy);
ba4e7d97
TH
457 return ret;
458}
459EXPORT_SYMBOL(ttm_bo_move_memcpy);
460
461static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
462{
5452cf44
CK
463 struct ttm_transfer_obj *fbo;
464
465 fbo = container_of(bo, struct ttm_transfer_obj, base);
f4490759 466 ttm_bo_put(fbo->bo);
5452cf44 467 kfree(fbo);
ba4e7d97
TH
468}
469
470/**
471 * ttm_buffer_object_transfer
472 *
473 * @bo: A pointer to a struct ttm_buffer_object.
474 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
475 * holding the data of @bo with the old placement.
476 *
477 * This is a utility function that may be called after an accelerated move
478 * has been scheduled. A new buffer object is created as a placeholder for
479 * the old data while it's being copied. When that buffer object is idle,
480 * it can be destroyed, releasing the space of the old placement.
481 * Returns:
482 * !0: Failure.
483 */
484
485static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
486 struct ttm_buffer_object **new_obj)
487{
5452cf44 488 struct ttm_transfer_obj *fbo;
5e338405 489 int ret;
ba4e7d97 490
ff7c60c5 491 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
ba4e7d97
TH
492 if (!fbo)
493 return -ENOMEM;
494
5452cf44 495 fbo->base = *bo;
d6e820fc
CK
496 fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
497
498 ttm_bo_get(bo);
8129fdad 499 fbo->bo = bo;
ba4e7d97
TH
500
501 /**
502 * Fix up members that we shouldn't copy directly:
503 * TODO: Explicit member copy would probably be better here.
504 */
505
38392633 506 atomic_inc(&bo->bdev->glob->bo_count);
5452cf44
CK
507 INIT_LIST_HEAD(&fbo->base.ddestroy);
508 INIT_LIST_HEAD(&fbo->base.lru);
509 INIT_LIST_HEAD(&fbo->base.swap);
510 INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
511 mutex_init(&fbo->base.wu_mutex);
512 fbo->base.moving = NULL;
513 drm_vma_node_reset(&fbo->base.vma_node);
514 atomic_set(&fbo->base.cpu_writers, 0);
515
516 kref_init(&fbo->base.list_kref);
517 kref_init(&fbo->base.kref);
518 fbo->base.destroy = &ttm_transfered_destroy;
519 fbo->base.acc_size = 0;
520 fbo->base.resv = &fbo->base.ttm_resv;
521 reservation_object_init(fbo->base.resv);
522 ret = reservation_object_trylock(fbo->base.resv);
5e338405 523 WARN_ON(!ret);
ba4e7d97 524
5452cf44 525 *new_obj = &fbo->base;
ba4e7d97
TH
526 return 0;
527}
528
529pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
530{
94318d50
BH
531 /* Cached mappings need no adjustment */
532 if (caching_flags & TTM_PL_FLAG_CACHED)
533 return tmp;
534
ba4e7d97
TH
535#if defined(__i386__) || defined(__x86_64__)
536 if (caching_flags & TTM_PL_FLAG_WC)
537 tmp = pgprot_writecombine(tmp);
538 else if (boot_cpu_data.x86 > 3)
539 tmp = pgprot_noncached(tmp);
ba4e7d97 540#endif
f135b978
AC
541#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
542 defined(__powerpc__)
ba4e7d97
TH
543 if (caching_flags & TTM_PL_FLAG_WC)
544 tmp = pgprot_writecombine(tmp);
545 else
546 tmp = pgprot_noncached(tmp);
547#endif
04cf55e1 548#if defined(__sparc__) || defined(__mips__)
94318d50 549 tmp = pgprot_noncached(tmp);
ba4e7d97
TH
550#endif
551 return tmp;
552}
4bfd75cb 553EXPORT_SYMBOL(ttm_io_prot);
ba4e7d97
TH
554
555static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
82c5da6b
JG
556 unsigned long offset,
557 unsigned long size,
ba4e7d97
TH
558 struct ttm_bo_kmap_obj *map)
559{
ba4e7d97 560 struct ttm_mem_reg *mem = &bo->mem;
ba4e7d97 561
82c5da6b 562 if (bo->mem.bus.addr) {
ba4e7d97 563 map->bo_kmap_type = ttm_bo_map_premapped;
82c5da6b 564 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
ba4e7d97
TH
565 } else {
566 map->bo_kmap_type = ttm_bo_map_iomap;
567 if (mem->placement & TTM_PL_FLAG_WC)
82c5da6b
JG
568 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
569 size);
ba4e7d97 570 else
82c5da6b
JG
571 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
572 size);
ba4e7d97
TH
573 }
574 return (!map->virtual) ? -ENOMEM : 0;
575}
576
577static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
578 unsigned long start_page,
579 unsigned long num_pages,
580 struct ttm_bo_kmap_obj *map)
581{
d0cef9fa
RH
582 struct ttm_mem_reg *mem = &bo->mem;
583 struct ttm_operation_ctx ctx = {
584 .interruptible = false,
585 .no_wait_gpu = false
586 };
ba4e7d97 587 struct ttm_tt *ttm = bo->ttm;
d0cef9fa 588 pgprot_t prot;
b1e5f172 589 int ret;
ba4e7d97
TH
590
591 BUG_ON(!ttm);
b1e5f172 592
25893a14
CK
593 ret = ttm_tt_populate(ttm, &ctx);
594 if (ret)
595 return ret;
b1e5f172 596
ba4e7d97
TH
597 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
598 /*
599 * We're mapping a single page, and the desired
600 * page protection is consistent with the bo.
601 */
602
603 map->bo_kmap_type = ttm_bo_map_kmap;
b1e5f172 604 map->page = ttm->pages[start_page];
ba4e7d97
TH
605 map->virtual = kmap(map->page);
606 } else {
ba4e7d97
TH
607 /*
608 * We need to use vmap to get the desired page protection
af901ca1 609 * or to make the buffer object look contiguous.
ba4e7d97 610 */
94318d50 611 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
ba4e7d97
TH
612 map->bo_kmap_type = ttm_bo_map_vmap;
613 map->virtual = vmap(ttm->pages + start_page, num_pages,
614 0, prot);
615 }
616 return (!map->virtual) ? -ENOMEM : 0;
617}
618
619int ttm_bo_kmap(struct ttm_buffer_object *bo,
620 unsigned long start_page, unsigned long num_pages,
621 struct ttm_bo_kmap_obj *map)
622{
eba67093
TH
623 struct ttm_mem_type_manager *man =
624 &bo->bdev->man[bo->mem.mem_type];
82c5da6b 625 unsigned long offset, size;
ba4e7d97 626 int ret;
ba4e7d97 627
ba4e7d97 628 map->virtual = NULL;
82c5da6b 629 map->bo = bo;
ba4e7d97
TH
630 if (num_pages > bo->num_pages)
631 return -EINVAL;
632 if (start_page > bo->num_pages)
633 return -EINVAL;
02b29caf 634
eba67093 635 (void) ttm_mem_io_lock(man, false);
82c5da6b 636 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
eba67093 637 ttm_mem_io_unlock(man);
ba4e7d97
TH
638 if (ret)
639 return ret;
82c5da6b 640 if (!bo->mem.bus.is_iomem) {
ba4e7d97
TH
641 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
642 } else {
82c5da6b
JG
643 offset = start_page << PAGE_SHIFT;
644 size = num_pages << PAGE_SHIFT;
645 return ttm_bo_ioremap(bo, offset, size, map);
ba4e7d97
TH
646 }
647}
648EXPORT_SYMBOL(ttm_bo_kmap);
649
650void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
651{
eba67093
TH
652 struct ttm_buffer_object *bo = map->bo;
653 struct ttm_mem_type_manager *man =
654 &bo->bdev->man[bo->mem.mem_type];
655
ba4e7d97
TH
656 if (!map->virtual)
657 return;
658 switch (map->bo_kmap_type) {
659 case ttm_bo_map_iomap:
660 iounmap(map->virtual);
661 break;
662 case ttm_bo_map_vmap:
663 vunmap(map->virtual);
664 break;
665 case ttm_bo_map_kmap:
666 kunmap(map->page);
667 break;
668 case ttm_bo_map_premapped:
669 break;
670 default:
671 BUG();
672 }
eba67093
TH
673 (void) ttm_mem_io_lock(man, false);
674 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
675 ttm_mem_io_unlock(man);
ba4e7d97
TH
676 map->virtual = NULL;
677 map->page = NULL;
678}
679EXPORT_SYMBOL(ttm_bo_kunmap);
680
ba4e7d97 681int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
f54d1867 682 struct dma_fence *fence,
97a875cb 683 bool evict,
ba4e7d97
TH
684 struct ttm_mem_reg *new_mem)
685{
686 struct ttm_bo_device *bdev = bo->bdev;
ba4e7d97
TH
687 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
688 struct ttm_mem_reg *old_mem = &bo->mem;
689 int ret;
ba4e7d97 690 struct ttm_buffer_object *ghost_obj;
ba4e7d97 691
f2c24b83 692 reservation_object_add_excl_fence(bo->resv, fence);
ba4e7d97 693 if (evict) {
8aa6d4fc 694 ret = ttm_bo_wait(bo, false, false);
ba4e7d97
TH
695 if (ret)
696 return ret;
697
4279cb14 698 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ba4e7d97
TH
699 ttm_tt_destroy(bo->ttm);
700 bo->ttm = NULL;
701 }
eac20953 702 ttm_bo_free_old_node(bo);
ba4e7d97
TH
703 } else {
704 /**
705 * This should help pipeline ordinary buffer moves.
706 *
707 * Hang old buffer memory on a new buffer object,
708 * and leave it to be released when the GPU
709 * operation has completed.
710 */
711
f54d1867
CW
712 dma_fence_put(bo->moving);
713 bo->moving = dma_fence_get(fence);
ba4e7d97 714
ff7c60c5 715 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
ba4e7d97
TH
716 if (ret)
717 return ret;
718
f2c24b83
ML
719 reservation_object_add_excl_fence(ghost_obj->resv, fence);
720
ba4e7d97
TH
721 /**
722 * If we're not moving to fixed memory, the TTM object
723 * needs to stay alive. Otherwhise hang it on the ghost
724 * bo to be unbound and destroyed.
725 */
726
727 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
728 ghost_obj->ttm = NULL;
729 else
730 bo->ttm = NULL;
731
732 ttm_bo_unreserve(ghost_obj);
f4490759 733 ttm_bo_put(ghost_obj);
ba4e7d97
TH
734 }
735
736 *old_mem = *new_mem;
737 new_mem->mm_node = NULL;
110b20c3 738
ba4e7d97
TH
739 return 0;
740}
741EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
3ddf4ad9
CK
742
743int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
f54d1867 744 struct dma_fence *fence, bool evict,
3ddf4ad9
CK
745 struct ttm_mem_reg *new_mem)
746{
747 struct ttm_bo_device *bdev = bo->bdev;
748 struct ttm_mem_reg *old_mem = &bo->mem;
749
750 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
751 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
752
753 int ret;
754
755 reservation_object_add_excl_fence(bo->resv, fence);
756
757 if (!evict) {
758 struct ttm_buffer_object *ghost_obj;
759
760 /**
761 * This should help pipeline ordinary buffer moves.
762 *
763 * Hang old buffer memory on a new buffer object,
764 * and leave it to be released when the GPU
765 * operation has completed.
766 */
767
f54d1867
CW
768 dma_fence_put(bo->moving);
769 bo->moving = dma_fence_get(fence);
3ddf4ad9
CK
770
771 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
772 if (ret)
773 return ret;
774
775 reservation_object_add_excl_fence(ghost_obj->resv, fence);
776
777 /**
778 * If we're not moving to fixed memory, the TTM object
779 * needs to stay alive. Otherwhise hang it on the ghost
780 * bo to be unbound and destroyed.
781 */
782
783 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
784 ghost_obj->ttm = NULL;
785 else
786 bo->ttm = NULL;
787
788 ttm_bo_unreserve(ghost_obj);
f4490759 789 ttm_bo_put(ghost_obj);
3ddf4ad9
CK
790
791 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
792
793 /**
794 * BO doesn't have a TTM we need to bind/unbind. Just remember
795 * this eviction and free up the allocation
796 */
797
798 spin_lock(&from->move_lock);
f54d1867
CW
799 if (!from->move || dma_fence_is_later(fence, from->move)) {
800 dma_fence_put(from->move);
801 from->move = dma_fence_get(fence);
3ddf4ad9
CK
802 }
803 spin_unlock(&from->move_lock);
804
805 ttm_bo_free_old_node(bo);
806
f54d1867
CW
807 dma_fence_put(bo->moving);
808 bo->moving = dma_fence_get(fence);
3ddf4ad9
CK
809
810 } else {
811 /**
812 * Last resort, wait for the move to be completed.
813 *
814 * Should never happen in pratice.
815 */
816
817 ret = ttm_bo_wait(bo, false, false);
818 if (ret)
819 return ret;
820
821 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
822 ttm_tt_destroy(bo->ttm);
823 bo->ttm = NULL;
824 }
825 ttm_bo_free_old_node(bo);
826 }
827
828 *old_mem = *new_mem;
829 new_mem->mm_node = NULL;
830
831 return 0;
832}
833EXPORT_SYMBOL(ttm_bo_pipeline_move);
5d951098
CK
834
835int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
836{
837 struct ttm_buffer_object *ghost;
838 int ret;
839
840 ret = ttm_buffer_object_transfer(bo, &ghost);
841 if (ret)
842 return ret;
843
844 ret = reservation_object_copy_fences(ghost->resv, bo->resv);
845 /* Last resort, wait for the BO to be idle when we are OOM */
846 if (ret)
847 ttm_bo_wait(bo, false, false);
848
849 memset(&bo->mem, 0, sizeof(bo->mem));
850 bo->mem.mem_type = TTM_PL_SYSTEM;
851 bo->ttm = NULL;
852
853 ttm_bo_unreserve(ghost);
f4490759 854 ttm_bo_put(ghost);
5d951098
CK
855
856 return 0;
857}