]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/ttm/ttm_bo_util.c
drm/ttm: simplify ttm_bo_wait
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
CommitLineData
ba4e7d97
TH
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
760285e7
DH
31#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h>
72525b3f 33#include <drm/drm_vma_manager.h>
ba4e7d97
TH
34#include <linux/io.h>
35#include <linux/highmem.h>
36#include <linux/wait.h>
5a0e3ad6 37#include <linux/slab.h>
ba4e7d97 38#include <linux/vmalloc.h>
ba4e7d97 39#include <linux/module.h>
f2c24b83 40#include <linux/reservation.h>
ba4e7d97
TH
41
42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43{
42311ff9 44 ttm_bo_mem_put(bo, &bo->mem);
ba4e7d97
TH
45}
46
47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
97a875cb 48 bool evict,
9d87fa21 49 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
ba4e7d97
TH
50{
51 struct ttm_tt *ttm = bo->ttm;
52 struct ttm_mem_reg *old_mem = &bo->mem;
ba4e7d97
TH
53 int ret;
54
55 if (old_mem->mem_type != TTM_PL_SYSTEM) {
ba4e7d97
TH
56 ttm_bo_free_old_node(bo);
57 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
58 TTM_PL_MASK_MEM);
59 old_mem->mem_type = TTM_PL_SYSTEM;
ba4e7d97
TH
60 }
61
62 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
63 if (unlikely(ret != 0))
64 return ret;
65
66 if (new_mem->mem_type != TTM_PL_SYSTEM) {
67 ret = ttm_tt_bind(ttm, new_mem);
68 if (unlikely(ret != 0))
69 return ret;
70 }
71
72 *old_mem = *new_mem;
73 new_mem->mm_node = NULL;
110b20c3 74
ba4e7d97
TH
75 return 0;
76}
77EXPORT_SYMBOL(ttm_bo_move_ttm);
78
eba67093 79int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
82c5da6b 80{
eba67093
TH
81 if (likely(man->io_reserve_fastpath))
82 return 0;
83
84 if (interruptible)
85 return mutex_lock_interruptible(&man->io_reserve_mutex);
86
87 mutex_lock(&man->io_reserve_mutex);
88 return 0;
89}
afe6804c 90EXPORT_SYMBOL(ttm_mem_io_lock);
82c5da6b 91
eba67093
TH
92void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
93{
94 if (likely(man->io_reserve_fastpath))
95 return;
96
97 mutex_unlock(&man->io_reserve_mutex);
98}
afe6804c 99EXPORT_SYMBOL(ttm_mem_io_unlock);
eba67093
TH
100
101static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
102{
103 struct ttm_buffer_object *bo;
104
105 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
106 return -EAGAIN;
107
108 bo = list_first_entry(&man->io_reserve_lru,
109 struct ttm_buffer_object,
110 io_reserve_lru);
111 list_del_init(&bo->io_reserve_lru);
112 ttm_bo_unmap_virtual_locked(bo);
113
114 return 0;
115}
116
afe6804c
DA
117
118int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
119 struct ttm_mem_reg *mem)
eba67093
TH
120{
121 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
122 int ret = 0;
123
124 if (!bdev->driver->io_mem_reserve)
125 return 0;
126 if (likely(man->io_reserve_fastpath))
127 return bdev->driver->io_mem_reserve(bdev, mem);
128
129 if (bdev->driver->io_mem_reserve &&
130 mem->bus.io_reserved_count++ == 0) {
131retry:
0c321c79 132 ret = bdev->driver->io_mem_reserve(bdev, mem);
eba67093
TH
133 if (ret == -EAGAIN) {
134 ret = ttm_mem_io_evict(man);
135 if (ret == 0)
136 goto retry;
137 }
138 }
139 return ret;
140}
afe6804c 141EXPORT_SYMBOL(ttm_mem_io_reserve);
eba67093 142
afe6804c
DA
143void ttm_mem_io_free(struct ttm_bo_device *bdev,
144 struct ttm_mem_reg *mem)
eba67093
TH
145{
146 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
147
148 if (likely(man->io_reserve_fastpath))
149 return;
150
151 if (bdev->driver->io_mem_reserve &&
152 --mem->bus.io_reserved_count == 0 &&
153 bdev->driver->io_mem_free)
154 bdev->driver->io_mem_free(bdev, mem);
155
156}
afe6804c 157EXPORT_SYMBOL(ttm_mem_io_free);
eba67093
TH
158
159int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
160{
161 struct ttm_mem_reg *mem = &bo->mem;
162 int ret;
163
164 if (!mem->bus.io_reserved_vm) {
165 struct ttm_mem_type_manager *man =
166 &bo->bdev->man[mem->mem_type];
167
168 ret = ttm_mem_io_reserve(bo->bdev, mem);
82c5da6b
JG
169 if (unlikely(ret != 0))
170 return ret;
eba67093
TH
171 mem->bus.io_reserved_vm = true;
172 if (man->use_io_reserve_lru)
173 list_add_tail(&bo->io_reserve_lru,
174 &man->io_reserve_lru);
82c5da6b
JG
175 }
176 return 0;
177}
178
eba67093 179void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
82c5da6b 180{
eba67093
TH
181 struct ttm_mem_reg *mem = &bo->mem;
182
183 if (mem->bus.io_reserved_vm) {
184 mem->bus.io_reserved_vm = false;
185 list_del_init(&bo->io_reserve_lru);
186 ttm_mem_io_free(bo->bdev, mem);
82c5da6b
JG
187 }
188}
189
dcbff15a 190static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
ba4e7d97
TH
191 void **virtual)
192{
eba67093 193 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ba4e7d97
TH
194 int ret;
195 void *addr;
196
197 *virtual = NULL;
eba67093 198 (void) ttm_mem_io_lock(man, false);
82c5da6b 199 ret = ttm_mem_io_reserve(bdev, mem);
eba67093 200 ttm_mem_io_unlock(man);
9e51159c 201 if (ret || !mem->bus.is_iomem)
ba4e7d97
TH
202 return ret;
203
82c5da6b
JG
204 if (mem->bus.addr) {
205 addr = mem->bus.addr;
206 } else {
ba4e7d97 207 if (mem->placement & TTM_PL_FLAG_WC)
82c5da6b 208 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
ba4e7d97 209 else
82c5da6b
JG
210 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
211 if (!addr) {
eba67093 212 (void) ttm_mem_io_lock(man, false);
82c5da6b 213 ttm_mem_io_free(bdev, mem);
eba67093 214 ttm_mem_io_unlock(man);
ba4e7d97 215 return -ENOMEM;
82c5da6b 216 }
ba4e7d97
TH
217 }
218 *virtual = addr;
219 return 0;
220}
221
dcbff15a 222static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
ba4e7d97
TH
223 void *virtual)
224{
225 struct ttm_mem_type_manager *man;
226
227 man = &bdev->man[mem->mem_type];
228
0c321c79 229 if (virtual && mem->bus.addr == NULL)
ba4e7d97 230 iounmap(virtual);
eba67093 231 (void) ttm_mem_io_lock(man, false);
82c5da6b 232 ttm_mem_io_free(bdev, mem);
eba67093 233 ttm_mem_io_unlock(man);
ba4e7d97
TH
234}
235
236static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
237{
238 uint32_t *dstP =
239 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
240 uint32_t *srcP =
241 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
242
243 int i;
244 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
245 iowrite32(ioread32(srcP++), dstP++);
246 return 0;
247}
248
249static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
542c6f6d
TH
250 unsigned long page,
251 pgprot_t prot)
ba4e7d97 252{
b1e5f172 253 struct page *d = ttm->pages[page];
ba4e7d97
TH
254 void *dst;
255
256 if (!d)
257 return -ENOMEM;
258
259 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
542c6f6d
TH
260
261#ifdef CONFIG_X86
3e4d3af5 262 dst = kmap_atomic_prot(d, prot);
542c6f6d 263#else
6d0897ba 264 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
265 dst = vmap(&d, 1, 0, prot);
266 else
267 dst = kmap(d);
268#endif
ba4e7d97
TH
269 if (!dst)
270 return -ENOMEM;
271
272 memcpy_fromio(dst, src, PAGE_SIZE);
542c6f6d
TH
273
274#ifdef CONFIG_X86
3e4d3af5 275 kunmap_atomic(dst);
542c6f6d 276#else
6d0897ba 277 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
278 vunmap(dst);
279 else
280 kunmap(d);
281#endif
282
ba4e7d97
TH
283 return 0;
284}
285
286static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
542c6f6d
TH
287 unsigned long page,
288 pgprot_t prot)
ba4e7d97 289{
b1e5f172 290 struct page *s = ttm->pages[page];
ba4e7d97
TH
291 void *src;
292
293 if (!s)
294 return -ENOMEM;
295
296 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
542c6f6d 297#ifdef CONFIG_X86
3e4d3af5 298 src = kmap_atomic_prot(s, prot);
542c6f6d 299#else
6d0897ba 300 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
301 src = vmap(&s, 1, 0, prot);
302 else
303 src = kmap(s);
304#endif
ba4e7d97
TH
305 if (!src)
306 return -ENOMEM;
307
308 memcpy_toio(dst, src, PAGE_SIZE);
542c6f6d
TH
309
310#ifdef CONFIG_X86
3e4d3af5 311 kunmap_atomic(src);
542c6f6d 312#else
6d0897ba 313 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
542c6f6d
TH
314 vunmap(src);
315 else
316 kunmap(s);
317#endif
318
ba4e7d97
TH
319 return 0;
320}
321
322int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
77dfc28b
CK
323 bool evict, bool interruptible,
324 bool no_wait_gpu,
9d87fa21 325 struct ttm_mem_reg *new_mem)
ba4e7d97
TH
326{
327 struct ttm_bo_device *bdev = bo->bdev;
328 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
329 struct ttm_tt *ttm = bo->ttm;
330 struct ttm_mem_reg *old_mem = &bo->mem;
e22469ca 331 struct ttm_mem_reg old_copy = *old_mem;
ba4e7d97
TH
332 void *old_iomap;
333 void *new_iomap;
334 int ret;
ba4e7d97
TH
335 unsigned long i;
336 unsigned long page;
337 unsigned long add = 0;
338 int dir;
339
77dfc28b
CK
340 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
341 if (ret)
342 return ret;
343
ba4e7d97
TH
344 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
345 if (ret)
346 return ret;
347 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
348 if (ret)
349 goto out;
350
da95c788
TH
351 /*
352 * Single TTM move. NOP.
353 */
ba4e7d97
TH
354 if (old_iomap == NULL && new_iomap == NULL)
355 goto out2;
da95c788
TH
356
357 /*
0bc25425 358 * Don't move nonexistent data. Clear destination instead.
da95c788 359 */
0bc25425 360 if (old_iomap == NULL &&
2e6d8b46
TH
361 (ttm == NULL || (ttm->state == tt_unpopulated &&
362 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
0bc25425 363 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
ba4e7d97 364 goto out2;
0bc25425 365 }
ba4e7d97 366
da95c788
TH
367 /*
368 * TTM might be null for moves within the same region.
9a0599dd
JB
369 */
370 if (ttm && ttm->state == tt_unpopulated) {
b1e5f172 371 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
da95c788 372 if (ret)
b1e5f172
JG
373 goto out1;
374 }
375
ba4e7d97
TH
376 add = 0;
377 dir = 1;
378
379 if ((old_mem->mem_type == new_mem->mem_type) &&
d961db75 380 (new_mem->start < old_mem->start + old_mem->size)) {
ba4e7d97
TH
381 dir = -1;
382 add = new_mem->num_pages - 1;
383 }
384
385 for (i = 0; i < new_mem->num_pages; ++i) {
386 page = i * dir + add;
542c6f6d
TH
387 if (old_iomap == NULL) {
388 pgprot_t prot = ttm_io_prot(old_mem->placement,
389 PAGE_KERNEL);
390 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
391 prot);
392 } else if (new_iomap == NULL) {
393 pgprot_t prot = ttm_io_prot(new_mem->placement,
394 PAGE_KERNEL);
395 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
396 prot);
397 } else
ba4e7d97 398 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
da95c788 399 if (ret)
ba4e7d97
TH
400 goto out1;
401 }
402 mb();
403out2:
eba67093 404 old_copy = *old_mem;
ba4e7d97
TH
405 *old_mem = *new_mem;
406 new_mem->mm_node = NULL;
ba4e7d97 407
4279cb14 408 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ba4e7d97
TH
409 ttm_tt_destroy(ttm);
410 bo->ttm = NULL;
411 }
412
413out1:
eba67093 414 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
ba4e7d97
TH
415out:
416 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
da95c788
TH
417
418 /*
419 * On error, keep the mm node!
420 */
421 if (!ret)
422 ttm_bo_mem_put(bo, &old_copy);
ba4e7d97
TH
423 return ret;
424}
425EXPORT_SYMBOL(ttm_bo_move_memcpy);
426
427static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
428{
429 kfree(bo);
430}
431
432/**
433 * ttm_buffer_object_transfer
434 *
435 * @bo: A pointer to a struct ttm_buffer_object.
436 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
437 * holding the data of @bo with the old placement.
438 *
439 * This is a utility function that may be called after an accelerated move
440 * has been scheduled. A new buffer object is created as a placeholder for
441 * the old data while it's being copied. When that buffer object is idle,
442 * it can be destroyed, releasing the space of the old placement.
443 * Returns:
444 * !0: Failure.
445 */
446
447static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
448 struct ttm_buffer_object **new_obj)
449{
450 struct ttm_buffer_object *fbo;
5e338405 451 int ret;
ba4e7d97 452
ff7c60c5 453 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
ba4e7d97
TH
454 if (!fbo)
455 return -ENOMEM;
456
457 *fbo = *bo;
458
459 /**
460 * Fix up members that we shouldn't copy directly:
461 * TODO: Explicit member copy would probably be better here.
462 */
463
ba4e7d97
TH
464 INIT_LIST_HEAD(&fbo->ddestroy);
465 INIT_LIST_HEAD(&fbo->lru);
466 INIT_LIST_HEAD(&fbo->swap);
eba67093 467 INIT_LIST_HEAD(&fbo->io_reserve_lru);
5bc73067 468 fbo->moving = NULL;
72525b3f 469 drm_vma_node_reset(&fbo->vma_node);
0fbecd40 470 atomic_set(&fbo->cpu_writers, 0);
ba4e7d97 471
ba4e7d97
TH
472 kref_init(&fbo->list_kref);
473 kref_init(&fbo->kref);
474 fbo->destroy = &ttm_transfered_destroy;
57de4ba9 475 fbo->acc_size = 0;
5e338405
ML
476 fbo->resv = &fbo->ttm_resv;
477 reservation_object_init(fbo->resv);
478 ret = ww_mutex_trylock(&fbo->resv->lock);
479 WARN_ON(!ret);
ba4e7d97
TH
480
481 *new_obj = fbo;
482 return 0;
483}
484
485pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
486{
94318d50
BH
487 /* Cached mappings need no adjustment */
488 if (caching_flags & TTM_PL_FLAG_CACHED)
489 return tmp;
490
ba4e7d97
TH
491#if defined(__i386__) || defined(__x86_64__)
492 if (caching_flags & TTM_PL_FLAG_WC)
493 tmp = pgprot_writecombine(tmp);
494 else if (boot_cpu_data.x86 > 3)
495 tmp = pgprot_noncached(tmp);
ba4e7d97 496#endif
f135b978
AC
497#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
498 defined(__powerpc__)
ba4e7d97
TH
499 if (caching_flags & TTM_PL_FLAG_WC)
500 tmp = pgprot_writecombine(tmp);
501 else
502 tmp = pgprot_noncached(tmp);
503#endif
04cf55e1 504#if defined(__sparc__) || defined(__mips__)
94318d50 505 tmp = pgprot_noncached(tmp);
ba4e7d97
TH
506#endif
507 return tmp;
508}
4bfd75cb 509EXPORT_SYMBOL(ttm_io_prot);
ba4e7d97
TH
510
511static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
82c5da6b
JG
512 unsigned long offset,
513 unsigned long size,
ba4e7d97
TH
514 struct ttm_bo_kmap_obj *map)
515{
ba4e7d97 516 struct ttm_mem_reg *mem = &bo->mem;
ba4e7d97 517
82c5da6b 518 if (bo->mem.bus.addr) {
ba4e7d97 519 map->bo_kmap_type = ttm_bo_map_premapped;
82c5da6b 520 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
ba4e7d97
TH
521 } else {
522 map->bo_kmap_type = ttm_bo_map_iomap;
523 if (mem->placement & TTM_PL_FLAG_WC)
82c5da6b
JG
524 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
525 size);
ba4e7d97 526 else
82c5da6b
JG
527 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
528 size);
ba4e7d97
TH
529 }
530 return (!map->virtual) ? -ENOMEM : 0;
531}
532
533static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
534 unsigned long start_page,
535 unsigned long num_pages,
536 struct ttm_bo_kmap_obj *map)
537{
538 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
539 struct ttm_tt *ttm = bo->ttm;
b1e5f172 540 int ret;
ba4e7d97
TH
541
542 BUG_ON(!ttm);
b1e5f172
JG
543
544 if (ttm->state == tt_unpopulated) {
545 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
546 if (ret)
547 return ret;
548 }
549
ba4e7d97
TH
550 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
551 /*
552 * We're mapping a single page, and the desired
553 * page protection is consistent with the bo.
554 */
555
556 map->bo_kmap_type = ttm_bo_map_kmap;
b1e5f172 557 map->page = ttm->pages[start_page];
ba4e7d97
TH
558 map->virtual = kmap(map->page);
559 } else {
ba4e7d97
TH
560 /*
561 * We need to use vmap to get the desired page protection
af901ca1 562 * or to make the buffer object look contiguous.
ba4e7d97 563 */
94318d50 564 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
ba4e7d97
TH
565 map->bo_kmap_type = ttm_bo_map_vmap;
566 map->virtual = vmap(ttm->pages + start_page, num_pages,
567 0, prot);
568 }
569 return (!map->virtual) ? -ENOMEM : 0;
570}
571
572int ttm_bo_kmap(struct ttm_buffer_object *bo,
573 unsigned long start_page, unsigned long num_pages,
574 struct ttm_bo_kmap_obj *map)
575{
eba67093
TH
576 struct ttm_mem_type_manager *man =
577 &bo->bdev->man[bo->mem.mem_type];
82c5da6b 578 unsigned long offset, size;
ba4e7d97 579 int ret;
ba4e7d97
TH
580
581 BUG_ON(!list_empty(&bo->swap));
582 map->virtual = NULL;
82c5da6b 583 map->bo = bo;
ba4e7d97
TH
584 if (num_pages > bo->num_pages)
585 return -EINVAL;
586 if (start_page > bo->num_pages)
587 return -EINVAL;
588#if 0
4cda878b 589 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
ba4e7d97
TH
590 return -EPERM;
591#endif
eba67093 592 (void) ttm_mem_io_lock(man, false);
82c5da6b 593 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
eba67093 594 ttm_mem_io_unlock(man);
ba4e7d97
TH
595 if (ret)
596 return ret;
82c5da6b 597 if (!bo->mem.bus.is_iomem) {
ba4e7d97
TH
598 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
599 } else {
82c5da6b
JG
600 offset = start_page << PAGE_SHIFT;
601 size = num_pages << PAGE_SHIFT;
602 return ttm_bo_ioremap(bo, offset, size, map);
ba4e7d97
TH
603 }
604}
605EXPORT_SYMBOL(ttm_bo_kmap);
606
607void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
608{
eba67093
TH
609 struct ttm_buffer_object *bo = map->bo;
610 struct ttm_mem_type_manager *man =
611 &bo->bdev->man[bo->mem.mem_type];
612
ba4e7d97
TH
613 if (!map->virtual)
614 return;
615 switch (map->bo_kmap_type) {
616 case ttm_bo_map_iomap:
617 iounmap(map->virtual);
618 break;
619 case ttm_bo_map_vmap:
620 vunmap(map->virtual);
621 break;
622 case ttm_bo_map_kmap:
623 kunmap(map->page);
624 break;
625 case ttm_bo_map_premapped:
626 break;
627 default:
628 BUG();
629 }
eba67093
TH
630 (void) ttm_mem_io_lock(man, false);
631 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
632 ttm_mem_io_unlock(man);
ba4e7d97
TH
633 map->virtual = NULL;
634 map->page = NULL;
635}
636EXPORT_SYMBOL(ttm_bo_kunmap);
637
ba4e7d97 638int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
f2c24b83 639 struct fence *fence,
97a875cb 640 bool evict,
ba4e7d97
TH
641 struct ttm_mem_reg *new_mem)
642{
643 struct ttm_bo_device *bdev = bo->bdev;
ba4e7d97
TH
644 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
645 struct ttm_mem_reg *old_mem = &bo->mem;
646 int ret;
ba4e7d97 647 struct ttm_buffer_object *ghost_obj;
ba4e7d97 648
f2c24b83 649 reservation_object_add_excl_fence(bo->resv, fence);
ba4e7d97 650 if (evict) {
8aa6d4fc 651 ret = ttm_bo_wait(bo, false, false);
ba4e7d97
TH
652 if (ret)
653 return ret;
654
4279cb14 655 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ba4e7d97
TH
656 ttm_tt_destroy(bo->ttm);
657 bo->ttm = NULL;
658 }
eac20953 659 ttm_bo_free_old_node(bo);
ba4e7d97
TH
660 } else {
661 /**
662 * This should help pipeline ordinary buffer moves.
663 *
664 * Hang old buffer memory on a new buffer object,
665 * and leave it to be released when the GPU
666 * operation has completed.
667 */
668
5bc73067
CK
669 fence_put(bo->moving);
670 bo->moving = fence_get(fence);
ba4e7d97 671
ff7c60c5 672 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
ba4e7d97
TH
673 if (ret)
674 return ret;
675
f2c24b83
ML
676 reservation_object_add_excl_fence(ghost_obj->resv, fence);
677
ba4e7d97
TH
678 /**
679 * If we're not moving to fixed memory, the TTM object
680 * needs to stay alive. Otherwhise hang it on the ghost
681 * bo to be unbound and destroyed.
682 */
683
684 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
685 ghost_obj->ttm = NULL;
686 else
687 bo->ttm = NULL;
688
689 ttm_bo_unreserve(ghost_obj);
690 ttm_bo_unref(&ghost_obj);
691 }
692
693 *old_mem = *new_mem;
694 new_mem->mm_node = NULL;
110b20c3 695
ba4e7d97
TH
696 return 0;
697}
698EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);