]>
Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
760285e7 DH |
31 | #include <drm/ttm/ttm_bo_driver.h> |
32 | #include <drm/ttm/ttm_placement.h> | |
72525b3f | 33 | #include <drm/drm_vma_manager.h> |
ba4e7d97 TH |
34 | #include <linux/io.h> |
35 | #include <linux/highmem.h> | |
36 | #include <linux/wait.h> | |
5a0e3ad6 | 37 | #include <linux/slab.h> |
ba4e7d97 | 38 | #include <linux/vmalloc.h> |
ba4e7d97 | 39 | #include <linux/module.h> |
f2c24b83 | 40 | #include <linux/reservation.h> |
ba4e7d97 TH |
41 | |
42 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | |
43 | { | |
42311ff9 | 44 | ttm_bo_mem_put(bo, &bo->mem); |
ba4e7d97 TH |
45 | } |
46 | ||
47 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |
4e2f0caa MD |
48 | bool interruptible, bool no_wait_gpu, |
49 | struct ttm_mem_reg *new_mem) | |
ba4e7d97 TH |
50 | { |
51 | struct ttm_tt *ttm = bo->ttm; | |
52 | struct ttm_mem_reg *old_mem = &bo->mem; | |
ba4e7d97 TH |
53 | int ret; |
54 | ||
55 | if (old_mem->mem_type != TTM_PL_SYSTEM) { | |
7b8082bc MD |
56 | ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); |
57 | ||
58 | if (unlikely(ret != 0)) { | |
59 | if (ret != -ERESTARTSYS) | |
60 | pr_err("Failed to expire sync object before unbinding TTM\n"); | |
61 | return ret; | |
62 | } | |
63 | ||
2ff2bf1e | 64 | ttm_tt_unbind(ttm); |
ba4e7d97 TH |
65 | ttm_bo_free_old_node(bo); |
66 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, | |
67 | TTM_PL_MASK_MEM); | |
68 | old_mem->mem_type = TTM_PL_SYSTEM; | |
ba4e7d97 TH |
69 | } |
70 | ||
71 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); | |
72 | if (unlikely(ret != 0)) | |
73 | return ret; | |
74 | ||
75 | if (new_mem->mem_type != TTM_PL_SYSTEM) { | |
76 | ret = ttm_tt_bind(ttm, new_mem); | |
77 | if (unlikely(ret != 0)) | |
78 | return ret; | |
79 | } | |
80 | ||
81 | *old_mem = *new_mem; | |
82 | new_mem->mm_node = NULL; | |
110b20c3 | 83 | |
ba4e7d97 TH |
84 | return 0; |
85 | } | |
86 | EXPORT_SYMBOL(ttm_bo_move_ttm); | |
87 | ||
eba67093 | 88 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
82c5da6b | 89 | { |
eba67093 TH |
90 | if (likely(man->io_reserve_fastpath)) |
91 | return 0; | |
92 | ||
93 | if (interruptible) | |
94 | return mutex_lock_interruptible(&man->io_reserve_mutex); | |
95 | ||
96 | mutex_lock(&man->io_reserve_mutex); | |
97 | return 0; | |
98 | } | |
afe6804c | 99 | EXPORT_SYMBOL(ttm_mem_io_lock); |
82c5da6b | 100 | |
eba67093 TH |
101 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
102 | { | |
103 | if (likely(man->io_reserve_fastpath)) | |
104 | return; | |
105 | ||
106 | mutex_unlock(&man->io_reserve_mutex); | |
107 | } | |
afe6804c | 108 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
eba67093 TH |
109 | |
110 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) | |
111 | { | |
112 | struct ttm_buffer_object *bo; | |
113 | ||
114 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) | |
115 | return -EAGAIN; | |
116 | ||
117 | bo = list_first_entry(&man->io_reserve_lru, | |
118 | struct ttm_buffer_object, | |
119 | io_reserve_lru); | |
120 | list_del_init(&bo->io_reserve_lru); | |
121 | ttm_bo_unmap_virtual_locked(bo); | |
122 | ||
123 | return 0; | |
124 | } | |
125 | ||
afe6804c DA |
126 | |
127 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | |
128 | struct ttm_mem_reg *mem) | |
eba67093 TH |
129 | { |
130 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
131 | int ret = 0; | |
132 | ||
133 | if (!bdev->driver->io_mem_reserve) | |
134 | return 0; | |
135 | if (likely(man->io_reserve_fastpath)) | |
136 | return bdev->driver->io_mem_reserve(bdev, mem); | |
137 | ||
138 | if (bdev->driver->io_mem_reserve && | |
139 | mem->bus.io_reserved_count++ == 0) { | |
140 | retry: | |
0c321c79 | 141 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
eba67093 TH |
142 | if (ret == -EAGAIN) { |
143 | ret = ttm_mem_io_evict(man); | |
144 | if (ret == 0) | |
145 | goto retry; | |
146 | } | |
147 | } | |
148 | return ret; | |
149 | } | |
afe6804c | 150 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
eba67093 | 151 | |
afe6804c DA |
152 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
153 | struct ttm_mem_reg *mem) | |
eba67093 TH |
154 | { |
155 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
156 | ||
157 | if (likely(man->io_reserve_fastpath)) | |
158 | return; | |
159 | ||
160 | if (bdev->driver->io_mem_reserve && | |
161 | --mem->bus.io_reserved_count == 0 && | |
162 | bdev->driver->io_mem_free) | |
163 | bdev->driver->io_mem_free(bdev, mem); | |
164 | ||
165 | } | |
afe6804c | 166 | EXPORT_SYMBOL(ttm_mem_io_free); |
eba67093 TH |
167 | |
168 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) | |
169 | { | |
170 | struct ttm_mem_reg *mem = &bo->mem; | |
171 | int ret; | |
172 | ||
173 | if (!mem->bus.io_reserved_vm) { | |
174 | struct ttm_mem_type_manager *man = | |
175 | &bo->bdev->man[mem->mem_type]; | |
176 | ||
177 | ret = ttm_mem_io_reserve(bo->bdev, mem); | |
82c5da6b JG |
178 | if (unlikely(ret != 0)) |
179 | return ret; | |
eba67093 TH |
180 | mem->bus.io_reserved_vm = true; |
181 | if (man->use_io_reserve_lru) | |
182 | list_add_tail(&bo->io_reserve_lru, | |
183 | &man->io_reserve_lru); | |
82c5da6b JG |
184 | } |
185 | return 0; | |
186 | } | |
187 | ||
eba67093 | 188 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
82c5da6b | 189 | { |
eba67093 TH |
190 | struct ttm_mem_reg *mem = &bo->mem; |
191 | ||
192 | if (mem->bus.io_reserved_vm) { | |
193 | mem->bus.io_reserved_vm = false; | |
194 | list_del_init(&bo->io_reserve_lru); | |
195 | ttm_mem_io_free(bo->bdev, mem); | |
82c5da6b JG |
196 | } |
197 | } | |
198 | ||
dcbff15a | 199 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
ba4e7d97 TH |
200 | void **virtual) |
201 | { | |
eba67093 | 202 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
ba4e7d97 TH |
203 | int ret; |
204 | void *addr; | |
205 | ||
206 | *virtual = NULL; | |
eba67093 | 207 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 208 | ret = ttm_mem_io_reserve(bdev, mem); |
eba67093 | 209 | ttm_mem_io_unlock(man); |
9e51159c | 210 | if (ret || !mem->bus.is_iomem) |
ba4e7d97 TH |
211 | return ret; |
212 | ||
82c5da6b JG |
213 | if (mem->bus.addr) { |
214 | addr = mem->bus.addr; | |
215 | } else { | |
ba4e7d97 | 216 | if (mem->placement & TTM_PL_FLAG_WC) |
82c5da6b | 217 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
ba4e7d97 | 218 | else |
82c5da6b JG |
219 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
220 | if (!addr) { | |
eba67093 | 221 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 222 | ttm_mem_io_free(bdev, mem); |
eba67093 | 223 | ttm_mem_io_unlock(man); |
ba4e7d97 | 224 | return -ENOMEM; |
82c5da6b | 225 | } |
ba4e7d97 TH |
226 | } |
227 | *virtual = addr; | |
228 | return 0; | |
229 | } | |
230 | ||
dcbff15a | 231 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
ba4e7d97 TH |
232 | void *virtual) |
233 | { | |
234 | struct ttm_mem_type_manager *man; | |
235 | ||
236 | man = &bdev->man[mem->mem_type]; | |
237 | ||
0c321c79 | 238 | if (virtual && mem->bus.addr == NULL) |
ba4e7d97 | 239 | iounmap(virtual); |
eba67093 | 240 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 241 | ttm_mem_io_free(bdev, mem); |
eba67093 | 242 | ttm_mem_io_unlock(man); |
ba4e7d97 TH |
243 | } |
244 | ||
245 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |
246 | { | |
247 | uint32_t *dstP = | |
248 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); | |
249 | uint32_t *srcP = | |
250 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); | |
251 | ||
252 | int i; | |
253 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) | |
254 | iowrite32(ioread32(srcP++), dstP++); | |
255 | return 0; | |
256 | } | |
257 | ||
258 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |
542c6f6d TH |
259 | unsigned long page, |
260 | pgprot_t prot) | |
ba4e7d97 | 261 | { |
b1e5f172 | 262 | struct page *d = ttm->pages[page]; |
ba4e7d97 TH |
263 | void *dst; |
264 | ||
265 | if (!d) | |
266 | return -ENOMEM; | |
267 | ||
268 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | |
542c6f6d TH |
269 | |
270 | #ifdef CONFIG_X86 | |
3e4d3af5 | 271 | dst = kmap_atomic_prot(d, prot); |
542c6f6d | 272 | #else |
6d0897ba | 273 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
274 | dst = vmap(&d, 1, 0, prot); |
275 | else | |
276 | dst = kmap(d); | |
277 | #endif | |
ba4e7d97 TH |
278 | if (!dst) |
279 | return -ENOMEM; | |
280 | ||
281 | memcpy_fromio(dst, src, PAGE_SIZE); | |
542c6f6d TH |
282 | |
283 | #ifdef CONFIG_X86 | |
3e4d3af5 | 284 | kunmap_atomic(dst); |
542c6f6d | 285 | #else |
6d0897ba | 286 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
287 | vunmap(dst); |
288 | else | |
289 | kunmap(d); | |
290 | #endif | |
291 | ||
ba4e7d97 TH |
292 | return 0; |
293 | } | |
294 | ||
295 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |
542c6f6d TH |
296 | unsigned long page, |
297 | pgprot_t prot) | |
ba4e7d97 | 298 | { |
b1e5f172 | 299 | struct page *s = ttm->pages[page]; |
ba4e7d97 TH |
300 | void *src; |
301 | ||
302 | if (!s) | |
303 | return -ENOMEM; | |
304 | ||
305 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | |
542c6f6d | 306 | #ifdef CONFIG_X86 |
3e4d3af5 | 307 | src = kmap_atomic_prot(s, prot); |
542c6f6d | 308 | #else |
6d0897ba | 309 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
310 | src = vmap(&s, 1, 0, prot); |
311 | else | |
312 | src = kmap(s); | |
313 | #endif | |
ba4e7d97 TH |
314 | if (!src) |
315 | return -ENOMEM; | |
316 | ||
317 | memcpy_toio(dst, src, PAGE_SIZE); | |
542c6f6d TH |
318 | |
319 | #ifdef CONFIG_X86 | |
3e4d3af5 | 320 | kunmap_atomic(src); |
542c6f6d | 321 | #else |
6d0897ba | 322 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
323 | vunmap(src); |
324 | else | |
325 | kunmap(s); | |
326 | #endif | |
327 | ||
ba4e7d97 TH |
328 | return 0; |
329 | } | |
330 | ||
331 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |
4499f2ac | 332 | bool interruptible, bool no_wait_gpu, |
9d87fa21 | 333 | struct ttm_mem_reg *new_mem) |
ba4e7d97 TH |
334 | { |
335 | struct ttm_bo_device *bdev = bo->bdev; | |
336 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | |
337 | struct ttm_tt *ttm = bo->ttm; | |
338 | struct ttm_mem_reg *old_mem = &bo->mem; | |
e22469ca | 339 | struct ttm_mem_reg old_copy = *old_mem; |
ba4e7d97 TH |
340 | void *old_iomap; |
341 | void *new_iomap; | |
342 | int ret; | |
ba4e7d97 TH |
343 | unsigned long i; |
344 | unsigned long page; | |
345 | unsigned long add = 0; | |
346 | int dir; | |
347 | ||
77dfc28b CK |
348 | ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); |
349 | if (ret) | |
350 | return ret; | |
351 | ||
ba4e7d97 TH |
352 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
353 | if (ret) | |
354 | return ret; | |
355 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); | |
356 | if (ret) | |
357 | goto out; | |
358 | ||
da95c788 TH |
359 | /* |
360 | * Single TTM move. NOP. | |
361 | */ | |
ba4e7d97 TH |
362 | if (old_iomap == NULL && new_iomap == NULL) |
363 | goto out2; | |
da95c788 TH |
364 | |
365 | /* | |
0bc25425 | 366 | * Don't move nonexistent data. Clear destination instead. |
da95c788 | 367 | */ |
0bc25425 | 368 | if (old_iomap == NULL && |
2e6d8b46 TH |
369 | (ttm == NULL || (ttm->state == tt_unpopulated && |
370 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { | |
0bc25425 | 371 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
ba4e7d97 | 372 | goto out2; |
0bc25425 | 373 | } |
ba4e7d97 | 374 | |
da95c788 TH |
375 | /* |
376 | * TTM might be null for moves within the same region. | |
9a0599dd JB |
377 | */ |
378 | if (ttm && ttm->state == tt_unpopulated) { | |
b1e5f172 | 379 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
da95c788 | 380 | if (ret) |
b1e5f172 JG |
381 | goto out1; |
382 | } | |
383 | ||
ba4e7d97 TH |
384 | add = 0; |
385 | dir = 1; | |
386 | ||
387 | if ((old_mem->mem_type == new_mem->mem_type) && | |
d961db75 | 388 | (new_mem->start < old_mem->start + old_mem->size)) { |
ba4e7d97 TH |
389 | dir = -1; |
390 | add = new_mem->num_pages - 1; | |
391 | } | |
392 | ||
393 | for (i = 0; i < new_mem->num_pages; ++i) { | |
394 | page = i * dir + add; | |
542c6f6d TH |
395 | if (old_iomap == NULL) { |
396 | pgprot_t prot = ttm_io_prot(old_mem->placement, | |
397 | PAGE_KERNEL); | |
398 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, | |
399 | prot); | |
400 | } else if (new_iomap == NULL) { | |
401 | pgprot_t prot = ttm_io_prot(new_mem->placement, | |
402 | PAGE_KERNEL); | |
403 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, | |
404 | prot); | |
405 | } else | |
ba4e7d97 | 406 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
da95c788 | 407 | if (ret) |
ba4e7d97 TH |
408 | goto out1; |
409 | } | |
410 | mb(); | |
411 | out2: | |
eba67093 | 412 | old_copy = *old_mem; |
ba4e7d97 TH |
413 | *old_mem = *new_mem; |
414 | new_mem->mm_node = NULL; | |
ba4e7d97 | 415 | |
4279cb14 | 416 | if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { |
ba4e7d97 TH |
417 | ttm_tt_destroy(ttm); |
418 | bo->ttm = NULL; | |
419 | } | |
420 | ||
421 | out1: | |
eba67093 | 422 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
ba4e7d97 TH |
423 | out: |
424 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | |
da95c788 TH |
425 | |
426 | /* | |
427 | * On error, keep the mm node! | |
428 | */ | |
429 | if (!ret) | |
430 | ttm_bo_mem_put(bo, &old_copy); | |
ba4e7d97 TH |
431 | return ret; |
432 | } | |
433 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | |
434 | ||
435 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) | |
436 | { | |
437 | kfree(bo); | |
438 | } | |
439 | ||
440 | /** | |
441 | * ttm_buffer_object_transfer | |
442 | * | |
443 | * @bo: A pointer to a struct ttm_buffer_object. | |
444 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, | |
445 | * holding the data of @bo with the old placement. | |
446 | * | |
447 | * This is a utility function that may be called after an accelerated move | |
448 | * has been scheduled. A new buffer object is created as a placeholder for | |
449 | * the old data while it's being copied. When that buffer object is idle, | |
450 | * it can be destroyed, releasing the space of the old placement. | |
451 | * Returns: | |
452 | * !0: Failure. | |
453 | */ | |
454 | ||
455 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |
456 | struct ttm_buffer_object **new_obj) | |
457 | { | |
458 | struct ttm_buffer_object *fbo; | |
5e338405 | 459 | int ret; |
ba4e7d97 | 460 | |
ff7c60c5 | 461 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
ba4e7d97 TH |
462 | if (!fbo) |
463 | return -ENOMEM; | |
464 | ||
465 | *fbo = *bo; | |
466 | ||
467 | /** | |
468 | * Fix up members that we shouldn't copy directly: | |
469 | * TODO: Explicit member copy would probably be better here. | |
470 | */ | |
471 | ||
7e96a135 | 472 | atomic_inc(&bo->glob->bo_count); |
ba4e7d97 TH |
473 | INIT_LIST_HEAD(&fbo->ddestroy); |
474 | INIT_LIST_HEAD(&fbo->lru); | |
475 | INIT_LIST_HEAD(&fbo->swap); | |
eba67093 | 476 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
4d98e5ee | 477 | mutex_init(&fbo->wu_mutex); |
5bc73067 | 478 | fbo->moving = NULL; |
72525b3f | 479 | drm_vma_node_reset(&fbo->vma_node); |
0fbecd40 | 480 | atomic_set(&fbo->cpu_writers, 0); |
ba4e7d97 | 481 | |
ba4e7d97 TH |
482 | kref_init(&fbo->list_kref); |
483 | kref_init(&fbo->kref); | |
484 | fbo->destroy = &ttm_transfered_destroy; | |
57de4ba9 | 485 | fbo->acc_size = 0; |
5e338405 ML |
486 | fbo->resv = &fbo->ttm_resv; |
487 | reservation_object_init(fbo->resv); | |
488 | ret = ww_mutex_trylock(&fbo->resv->lock); | |
489 | WARN_ON(!ret); | |
ba4e7d97 TH |
490 | |
491 | *new_obj = fbo; | |
492 | return 0; | |
493 | } | |
494 | ||
495 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |
496 | { | |
94318d50 BH |
497 | /* Cached mappings need no adjustment */ |
498 | if (caching_flags & TTM_PL_FLAG_CACHED) | |
499 | return tmp; | |
500 | ||
ba4e7d97 TH |
501 | #if defined(__i386__) || defined(__x86_64__) |
502 | if (caching_flags & TTM_PL_FLAG_WC) | |
503 | tmp = pgprot_writecombine(tmp); | |
504 | else if (boot_cpu_data.x86 > 3) | |
505 | tmp = pgprot_noncached(tmp); | |
ba4e7d97 | 506 | #endif |
f135b978 AC |
507 | #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ |
508 | defined(__powerpc__) | |
ba4e7d97 TH |
509 | if (caching_flags & TTM_PL_FLAG_WC) |
510 | tmp = pgprot_writecombine(tmp); | |
511 | else | |
512 | tmp = pgprot_noncached(tmp); | |
513 | #endif | |
04cf55e1 | 514 | #if defined(__sparc__) || defined(__mips__) |
94318d50 | 515 | tmp = pgprot_noncached(tmp); |
ba4e7d97 TH |
516 | #endif |
517 | return tmp; | |
518 | } | |
4bfd75cb | 519 | EXPORT_SYMBOL(ttm_io_prot); |
ba4e7d97 TH |
520 | |
521 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | |
82c5da6b JG |
522 | unsigned long offset, |
523 | unsigned long size, | |
ba4e7d97 TH |
524 | struct ttm_bo_kmap_obj *map) |
525 | { | |
ba4e7d97 | 526 | struct ttm_mem_reg *mem = &bo->mem; |
ba4e7d97 | 527 | |
82c5da6b | 528 | if (bo->mem.bus.addr) { |
ba4e7d97 | 529 | map->bo_kmap_type = ttm_bo_map_premapped; |
82c5da6b | 530 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
ba4e7d97 TH |
531 | } else { |
532 | map->bo_kmap_type = ttm_bo_map_iomap; | |
533 | if (mem->placement & TTM_PL_FLAG_WC) | |
82c5da6b JG |
534 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
535 | size); | |
ba4e7d97 | 536 | else |
82c5da6b JG |
537 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
538 | size); | |
ba4e7d97 TH |
539 | } |
540 | return (!map->virtual) ? -ENOMEM : 0; | |
541 | } | |
542 | ||
543 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | |
544 | unsigned long start_page, | |
545 | unsigned long num_pages, | |
546 | struct ttm_bo_kmap_obj *map) | |
547 | { | |
548 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; | |
549 | struct ttm_tt *ttm = bo->ttm; | |
b1e5f172 | 550 | int ret; |
ba4e7d97 TH |
551 | |
552 | BUG_ON(!ttm); | |
b1e5f172 JG |
553 | |
554 | if (ttm->state == tt_unpopulated) { | |
555 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); | |
556 | if (ret) | |
557 | return ret; | |
558 | } | |
559 | ||
ba4e7d97 TH |
560 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
561 | /* | |
562 | * We're mapping a single page, and the desired | |
563 | * page protection is consistent with the bo. | |
564 | */ | |
565 | ||
566 | map->bo_kmap_type = ttm_bo_map_kmap; | |
b1e5f172 | 567 | map->page = ttm->pages[start_page]; |
ba4e7d97 TH |
568 | map->virtual = kmap(map->page); |
569 | } else { | |
ba4e7d97 TH |
570 | /* |
571 | * We need to use vmap to get the desired page protection | |
af901ca1 | 572 | * or to make the buffer object look contiguous. |
ba4e7d97 | 573 | */ |
94318d50 | 574 | prot = ttm_io_prot(mem->placement, PAGE_KERNEL); |
ba4e7d97 TH |
575 | map->bo_kmap_type = ttm_bo_map_vmap; |
576 | map->virtual = vmap(ttm->pages + start_page, num_pages, | |
577 | 0, prot); | |
578 | } | |
579 | return (!map->virtual) ? -ENOMEM : 0; | |
580 | } | |
581 | ||
582 | int ttm_bo_kmap(struct ttm_buffer_object *bo, | |
583 | unsigned long start_page, unsigned long num_pages, | |
584 | struct ttm_bo_kmap_obj *map) | |
585 | { | |
eba67093 TH |
586 | struct ttm_mem_type_manager *man = |
587 | &bo->bdev->man[bo->mem.mem_type]; | |
82c5da6b | 588 | unsigned long offset, size; |
ba4e7d97 | 589 | int ret; |
ba4e7d97 | 590 | |
ba4e7d97 | 591 | map->virtual = NULL; |
82c5da6b | 592 | map->bo = bo; |
ba4e7d97 TH |
593 | if (num_pages > bo->num_pages) |
594 | return -EINVAL; | |
595 | if (start_page > bo->num_pages) | |
596 | return -EINVAL; | |
597 | #if 0 | |
4cda878b | 598 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
ba4e7d97 TH |
599 | return -EPERM; |
600 | #endif | |
eba67093 | 601 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 602 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
eba67093 | 603 | ttm_mem_io_unlock(man); |
ba4e7d97 TH |
604 | if (ret) |
605 | return ret; | |
82c5da6b | 606 | if (!bo->mem.bus.is_iomem) { |
ba4e7d97 TH |
607 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
608 | } else { | |
82c5da6b JG |
609 | offset = start_page << PAGE_SHIFT; |
610 | size = num_pages << PAGE_SHIFT; | |
611 | return ttm_bo_ioremap(bo, offset, size, map); | |
ba4e7d97 TH |
612 | } |
613 | } | |
614 | EXPORT_SYMBOL(ttm_bo_kmap); | |
615 | ||
616 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |
617 | { | |
eba67093 TH |
618 | struct ttm_buffer_object *bo = map->bo; |
619 | struct ttm_mem_type_manager *man = | |
620 | &bo->bdev->man[bo->mem.mem_type]; | |
621 | ||
ba4e7d97 TH |
622 | if (!map->virtual) |
623 | return; | |
624 | switch (map->bo_kmap_type) { | |
625 | case ttm_bo_map_iomap: | |
626 | iounmap(map->virtual); | |
627 | break; | |
628 | case ttm_bo_map_vmap: | |
629 | vunmap(map->virtual); | |
630 | break; | |
631 | case ttm_bo_map_kmap: | |
632 | kunmap(map->page); | |
633 | break; | |
634 | case ttm_bo_map_premapped: | |
635 | break; | |
636 | default: | |
637 | BUG(); | |
638 | } | |
eba67093 TH |
639 | (void) ttm_mem_io_lock(man, false); |
640 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | |
641 | ttm_mem_io_unlock(man); | |
ba4e7d97 TH |
642 | map->virtual = NULL; |
643 | map->page = NULL; | |
644 | } | |
645 | EXPORT_SYMBOL(ttm_bo_kunmap); | |
646 | ||
ba4e7d97 | 647 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
f54d1867 | 648 | struct dma_fence *fence, |
97a875cb | 649 | bool evict, |
ba4e7d97 TH |
650 | struct ttm_mem_reg *new_mem) |
651 | { | |
652 | struct ttm_bo_device *bdev = bo->bdev; | |
ba4e7d97 TH |
653 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
654 | struct ttm_mem_reg *old_mem = &bo->mem; | |
655 | int ret; | |
ba4e7d97 | 656 | struct ttm_buffer_object *ghost_obj; |
ba4e7d97 | 657 | |
f2c24b83 | 658 | reservation_object_add_excl_fence(bo->resv, fence); |
ba4e7d97 | 659 | if (evict) { |
8aa6d4fc | 660 | ret = ttm_bo_wait(bo, false, false); |
ba4e7d97 TH |
661 | if (ret) |
662 | return ret; | |
663 | ||
4279cb14 | 664 | if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { |
ba4e7d97 TH |
665 | ttm_tt_destroy(bo->ttm); |
666 | bo->ttm = NULL; | |
667 | } | |
eac20953 | 668 | ttm_bo_free_old_node(bo); |
ba4e7d97 TH |
669 | } else { |
670 | /** | |
671 | * This should help pipeline ordinary buffer moves. | |
672 | * | |
673 | * Hang old buffer memory on a new buffer object, | |
674 | * and leave it to be released when the GPU | |
675 | * operation has completed. | |
676 | */ | |
677 | ||
f54d1867 CW |
678 | dma_fence_put(bo->moving); |
679 | bo->moving = dma_fence_get(fence); | |
ba4e7d97 | 680 | |
ff7c60c5 | 681 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
ba4e7d97 TH |
682 | if (ret) |
683 | return ret; | |
684 | ||
f2c24b83 ML |
685 | reservation_object_add_excl_fence(ghost_obj->resv, fence); |
686 | ||
ba4e7d97 TH |
687 | /** |
688 | * If we're not moving to fixed memory, the TTM object | |
689 | * needs to stay alive. Otherwhise hang it on the ghost | |
690 | * bo to be unbound and destroyed. | |
691 | */ | |
692 | ||
693 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
694 | ghost_obj->ttm = NULL; | |
695 | else | |
696 | bo->ttm = NULL; | |
697 | ||
698 | ttm_bo_unreserve(ghost_obj); | |
699 | ttm_bo_unref(&ghost_obj); | |
700 | } | |
701 | ||
702 | *old_mem = *new_mem; | |
703 | new_mem->mm_node = NULL; | |
110b20c3 | 704 | |
ba4e7d97 TH |
705 | return 0; |
706 | } | |
707 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); | |
3ddf4ad9 CK |
708 | |
709 | int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, | |
f54d1867 | 710 | struct dma_fence *fence, bool evict, |
3ddf4ad9 CK |
711 | struct ttm_mem_reg *new_mem) |
712 | { | |
713 | struct ttm_bo_device *bdev = bo->bdev; | |
714 | struct ttm_mem_reg *old_mem = &bo->mem; | |
715 | ||
716 | struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; | |
717 | struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; | |
718 | ||
719 | int ret; | |
720 | ||
721 | reservation_object_add_excl_fence(bo->resv, fence); | |
722 | ||
723 | if (!evict) { | |
724 | struct ttm_buffer_object *ghost_obj; | |
725 | ||
726 | /** | |
727 | * This should help pipeline ordinary buffer moves. | |
728 | * | |
729 | * Hang old buffer memory on a new buffer object, | |
730 | * and leave it to be released when the GPU | |
731 | * operation has completed. | |
732 | */ | |
733 | ||
f54d1867 CW |
734 | dma_fence_put(bo->moving); |
735 | bo->moving = dma_fence_get(fence); | |
3ddf4ad9 CK |
736 | |
737 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | |
738 | if (ret) | |
739 | return ret; | |
740 | ||
741 | reservation_object_add_excl_fence(ghost_obj->resv, fence); | |
742 | ||
743 | /** | |
744 | * If we're not moving to fixed memory, the TTM object | |
745 | * needs to stay alive. Otherwhise hang it on the ghost | |
746 | * bo to be unbound and destroyed. | |
747 | */ | |
748 | ||
749 | if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
750 | ghost_obj->ttm = NULL; | |
751 | else | |
752 | bo->ttm = NULL; | |
753 | ||
754 | ttm_bo_unreserve(ghost_obj); | |
755 | ttm_bo_unref(&ghost_obj); | |
756 | ||
757 | } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { | |
758 | ||
759 | /** | |
760 | * BO doesn't have a TTM we need to bind/unbind. Just remember | |
761 | * this eviction and free up the allocation | |
762 | */ | |
763 | ||
764 | spin_lock(&from->move_lock); | |
f54d1867 CW |
765 | if (!from->move || dma_fence_is_later(fence, from->move)) { |
766 | dma_fence_put(from->move); | |
767 | from->move = dma_fence_get(fence); | |
3ddf4ad9 CK |
768 | } |
769 | spin_unlock(&from->move_lock); | |
770 | ||
771 | ttm_bo_free_old_node(bo); | |
772 | ||
f54d1867 CW |
773 | dma_fence_put(bo->moving); |
774 | bo->moving = dma_fence_get(fence); | |
3ddf4ad9 CK |
775 | |
776 | } else { | |
777 | /** | |
778 | * Last resort, wait for the move to be completed. | |
779 | * | |
780 | * Should never happen in pratice. | |
781 | */ | |
782 | ||
783 | ret = ttm_bo_wait(bo, false, false); | |
784 | if (ret) | |
785 | return ret; | |
786 | ||
787 | if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { | |
788 | ttm_tt_destroy(bo->ttm); | |
789 | bo->ttm = NULL; | |
790 | } | |
791 | ttm_bo_free_old_node(bo); | |
792 | } | |
793 | ||
794 | *old_mem = *new_mem; | |
795 | new_mem->mm_node = NULL; | |
796 | ||
797 | return 0; | |
798 | } | |
799 | EXPORT_SYMBOL(ttm_bo_pipeline_move); |