]>
Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
760285e7 DH |
31 | #include <drm/ttm/ttm_bo_driver.h> |
32 | #include <drm/ttm/ttm_placement.h> | |
ba4e7d97 TH |
33 | #include <linux/io.h> |
34 | #include <linux/highmem.h> | |
35 | #include <linux/wait.h> | |
5a0e3ad6 | 36 | #include <linux/slab.h> |
ba4e7d97 | 37 | #include <linux/vmalloc.h> |
ba4e7d97 TH |
38 | #include <linux/module.h> |
39 | ||
40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | |
41 | { | |
42311ff9 | 42 | ttm_bo_mem_put(bo, &bo->mem); |
ba4e7d97 TH |
43 | } |
44 | ||
45 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |
9d87fa21 JG |
46 | bool evict, bool no_wait_reserve, |
47 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) | |
ba4e7d97 TH |
48 | { |
49 | struct ttm_tt *ttm = bo->ttm; | |
50 | struct ttm_mem_reg *old_mem = &bo->mem; | |
ba4e7d97 TH |
51 | int ret; |
52 | ||
53 | if (old_mem->mem_type != TTM_PL_SYSTEM) { | |
54 | ttm_tt_unbind(ttm); | |
55 | ttm_bo_free_old_node(bo); | |
56 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, | |
57 | TTM_PL_MASK_MEM); | |
58 | old_mem->mem_type = TTM_PL_SYSTEM; | |
ba4e7d97 TH |
59 | } |
60 | ||
61 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); | |
62 | if (unlikely(ret != 0)) | |
63 | return ret; | |
64 | ||
65 | if (new_mem->mem_type != TTM_PL_SYSTEM) { | |
66 | ret = ttm_tt_bind(ttm, new_mem); | |
67 | if (unlikely(ret != 0)) | |
68 | return ret; | |
69 | } | |
70 | ||
71 | *old_mem = *new_mem; | |
72 | new_mem->mm_node = NULL; | |
110b20c3 | 73 | |
ba4e7d97 TH |
74 | return 0; |
75 | } | |
76 | EXPORT_SYMBOL(ttm_bo_move_ttm); | |
77 | ||
eba67093 | 78 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
82c5da6b | 79 | { |
eba67093 TH |
80 | if (likely(man->io_reserve_fastpath)) |
81 | return 0; | |
82 | ||
83 | if (interruptible) | |
84 | return mutex_lock_interruptible(&man->io_reserve_mutex); | |
85 | ||
86 | mutex_lock(&man->io_reserve_mutex); | |
87 | return 0; | |
88 | } | |
82c5da6b | 89 | |
eba67093 TH |
90 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
91 | { | |
92 | if (likely(man->io_reserve_fastpath)) | |
93 | return; | |
94 | ||
95 | mutex_unlock(&man->io_reserve_mutex); | |
96 | } | |
97 | ||
98 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) | |
99 | { | |
100 | struct ttm_buffer_object *bo; | |
101 | ||
102 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) | |
103 | return -EAGAIN; | |
104 | ||
105 | bo = list_first_entry(&man->io_reserve_lru, | |
106 | struct ttm_buffer_object, | |
107 | io_reserve_lru); | |
108 | list_del_init(&bo->io_reserve_lru); | |
109 | ttm_bo_unmap_virtual_locked(bo); | |
110 | ||
111 | return 0; | |
112 | } | |
113 | ||
114 | static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | |
115 | struct ttm_mem_reg *mem) | |
116 | { | |
117 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
118 | int ret = 0; | |
119 | ||
120 | if (!bdev->driver->io_mem_reserve) | |
121 | return 0; | |
122 | if (likely(man->io_reserve_fastpath)) | |
123 | return bdev->driver->io_mem_reserve(bdev, mem); | |
124 | ||
125 | if (bdev->driver->io_mem_reserve && | |
126 | mem->bus.io_reserved_count++ == 0) { | |
127 | retry: | |
0c321c79 | 128 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
eba67093 TH |
129 | if (ret == -EAGAIN) { |
130 | ret = ttm_mem_io_evict(man); | |
131 | if (ret == 0) | |
132 | goto retry; | |
133 | } | |
134 | } | |
135 | return ret; | |
136 | } | |
137 | ||
138 | static void ttm_mem_io_free(struct ttm_bo_device *bdev, | |
139 | struct ttm_mem_reg *mem) | |
140 | { | |
141 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
142 | ||
143 | if (likely(man->io_reserve_fastpath)) | |
144 | return; | |
145 | ||
146 | if (bdev->driver->io_mem_reserve && | |
147 | --mem->bus.io_reserved_count == 0 && | |
148 | bdev->driver->io_mem_free) | |
149 | bdev->driver->io_mem_free(bdev, mem); | |
150 | ||
151 | } | |
152 | ||
153 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) | |
154 | { | |
155 | struct ttm_mem_reg *mem = &bo->mem; | |
156 | int ret; | |
157 | ||
158 | if (!mem->bus.io_reserved_vm) { | |
159 | struct ttm_mem_type_manager *man = | |
160 | &bo->bdev->man[mem->mem_type]; | |
161 | ||
162 | ret = ttm_mem_io_reserve(bo->bdev, mem); | |
82c5da6b JG |
163 | if (unlikely(ret != 0)) |
164 | return ret; | |
eba67093 TH |
165 | mem->bus.io_reserved_vm = true; |
166 | if (man->use_io_reserve_lru) | |
167 | list_add_tail(&bo->io_reserve_lru, | |
168 | &man->io_reserve_lru); | |
82c5da6b JG |
169 | } |
170 | return 0; | |
171 | } | |
172 | ||
eba67093 | 173 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
82c5da6b | 174 | { |
eba67093 TH |
175 | struct ttm_mem_reg *mem = &bo->mem; |
176 | ||
177 | if (mem->bus.io_reserved_vm) { | |
178 | mem->bus.io_reserved_vm = false; | |
179 | list_del_init(&bo->io_reserve_lru); | |
180 | ttm_mem_io_free(bo->bdev, mem); | |
82c5da6b JG |
181 | } |
182 | } | |
183 | ||
ba4e7d97 TH |
184 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
185 | void **virtual) | |
186 | { | |
eba67093 | 187 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
ba4e7d97 TH |
188 | int ret; |
189 | void *addr; | |
190 | ||
191 | *virtual = NULL; | |
eba67093 | 192 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 193 | ret = ttm_mem_io_reserve(bdev, mem); |
eba67093 | 194 | ttm_mem_io_unlock(man); |
9e51159c | 195 | if (ret || !mem->bus.is_iomem) |
ba4e7d97 TH |
196 | return ret; |
197 | ||
82c5da6b JG |
198 | if (mem->bus.addr) { |
199 | addr = mem->bus.addr; | |
200 | } else { | |
ba4e7d97 | 201 | if (mem->placement & TTM_PL_FLAG_WC) |
82c5da6b | 202 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
ba4e7d97 | 203 | else |
82c5da6b JG |
204 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
205 | if (!addr) { | |
eba67093 | 206 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 207 | ttm_mem_io_free(bdev, mem); |
eba67093 | 208 | ttm_mem_io_unlock(man); |
ba4e7d97 | 209 | return -ENOMEM; |
82c5da6b | 210 | } |
ba4e7d97 TH |
211 | } |
212 | *virtual = addr; | |
213 | return 0; | |
214 | } | |
215 | ||
216 | void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |
217 | void *virtual) | |
218 | { | |
219 | struct ttm_mem_type_manager *man; | |
220 | ||
221 | man = &bdev->man[mem->mem_type]; | |
222 | ||
0c321c79 | 223 | if (virtual && mem->bus.addr == NULL) |
ba4e7d97 | 224 | iounmap(virtual); |
eba67093 | 225 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 226 | ttm_mem_io_free(bdev, mem); |
eba67093 | 227 | ttm_mem_io_unlock(man); |
ba4e7d97 TH |
228 | } |
229 | ||
230 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |
231 | { | |
232 | uint32_t *dstP = | |
233 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); | |
234 | uint32_t *srcP = | |
235 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); | |
236 | ||
237 | int i; | |
238 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) | |
239 | iowrite32(ioread32(srcP++), dstP++); | |
240 | return 0; | |
241 | } | |
242 | ||
243 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |
542c6f6d TH |
244 | unsigned long page, |
245 | pgprot_t prot) | |
ba4e7d97 | 246 | { |
b1e5f172 | 247 | struct page *d = ttm->pages[page]; |
ba4e7d97 TH |
248 | void *dst; |
249 | ||
250 | if (!d) | |
251 | return -ENOMEM; | |
252 | ||
253 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | |
542c6f6d TH |
254 | |
255 | #ifdef CONFIG_X86 | |
3e4d3af5 | 256 | dst = kmap_atomic_prot(d, prot); |
542c6f6d | 257 | #else |
6d0897ba | 258 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
259 | dst = vmap(&d, 1, 0, prot); |
260 | else | |
261 | dst = kmap(d); | |
262 | #endif | |
ba4e7d97 TH |
263 | if (!dst) |
264 | return -ENOMEM; | |
265 | ||
266 | memcpy_fromio(dst, src, PAGE_SIZE); | |
542c6f6d TH |
267 | |
268 | #ifdef CONFIG_X86 | |
3e4d3af5 | 269 | kunmap_atomic(dst); |
542c6f6d | 270 | #else |
6d0897ba | 271 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
272 | vunmap(dst); |
273 | else | |
274 | kunmap(d); | |
275 | #endif | |
276 | ||
ba4e7d97 TH |
277 | return 0; |
278 | } | |
279 | ||
280 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |
542c6f6d TH |
281 | unsigned long page, |
282 | pgprot_t prot) | |
ba4e7d97 | 283 | { |
b1e5f172 | 284 | struct page *s = ttm->pages[page]; |
ba4e7d97 TH |
285 | void *src; |
286 | ||
287 | if (!s) | |
288 | return -ENOMEM; | |
289 | ||
290 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | |
542c6f6d | 291 | #ifdef CONFIG_X86 |
3e4d3af5 | 292 | src = kmap_atomic_prot(s, prot); |
542c6f6d | 293 | #else |
6d0897ba | 294 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
295 | src = vmap(&s, 1, 0, prot); |
296 | else | |
297 | src = kmap(s); | |
298 | #endif | |
ba4e7d97 TH |
299 | if (!src) |
300 | return -ENOMEM; | |
301 | ||
302 | memcpy_toio(dst, src, PAGE_SIZE); | |
542c6f6d TH |
303 | |
304 | #ifdef CONFIG_X86 | |
3e4d3af5 | 305 | kunmap_atomic(src); |
542c6f6d | 306 | #else |
6d0897ba | 307 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
308 | vunmap(src); |
309 | else | |
310 | kunmap(s); | |
311 | #endif | |
312 | ||
ba4e7d97 TH |
313 | return 0; |
314 | } | |
315 | ||
316 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |
9d87fa21 JG |
317 | bool evict, bool no_wait_reserve, bool no_wait_gpu, |
318 | struct ttm_mem_reg *new_mem) | |
ba4e7d97 TH |
319 | { |
320 | struct ttm_bo_device *bdev = bo->bdev; | |
321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | |
322 | struct ttm_tt *ttm = bo->ttm; | |
323 | struct ttm_mem_reg *old_mem = &bo->mem; | |
e22469ca | 324 | struct ttm_mem_reg old_copy = *old_mem; |
ba4e7d97 TH |
325 | void *old_iomap; |
326 | void *new_iomap; | |
327 | int ret; | |
ba4e7d97 TH |
328 | unsigned long i; |
329 | unsigned long page; | |
330 | unsigned long add = 0; | |
331 | int dir; | |
332 | ||
333 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); | |
334 | if (ret) | |
335 | return ret; | |
336 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); | |
337 | if (ret) | |
338 | goto out; | |
339 | ||
340 | if (old_iomap == NULL && new_iomap == NULL) | |
341 | goto out2; | |
342 | if (old_iomap == NULL && ttm == NULL) | |
343 | goto out2; | |
344 | ||
b1e5f172 JG |
345 | if (ttm->state == tt_unpopulated) { |
346 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); | |
347 | if (ret) | |
348 | goto out1; | |
349 | } | |
350 | ||
ba4e7d97 TH |
351 | add = 0; |
352 | dir = 1; | |
353 | ||
354 | if ((old_mem->mem_type == new_mem->mem_type) && | |
d961db75 | 355 | (new_mem->start < old_mem->start + old_mem->size)) { |
ba4e7d97 TH |
356 | dir = -1; |
357 | add = new_mem->num_pages - 1; | |
358 | } | |
359 | ||
360 | for (i = 0; i < new_mem->num_pages; ++i) { | |
361 | page = i * dir + add; | |
542c6f6d TH |
362 | if (old_iomap == NULL) { |
363 | pgprot_t prot = ttm_io_prot(old_mem->placement, | |
364 | PAGE_KERNEL); | |
365 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, | |
366 | prot); | |
367 | } else if (new_iomap == NULL) { | |
368 | pgprot_t prot = ttm_io_prot(new_mem->placement, | |
369 | PAGE_KERNEL); | |
370 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, | |
371 | prot); | |
372 | } else | |
ba4e7d97 TH |
373 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
374 | if (ret) | |
375 | goto out1; | |
376 | } | |
377 | mb(); | |
378 | out2: | |
eba67093 | 379 | old_copy = *old_mem; |
ba4e7d97 TH |
380 | *old_mem = *new_mem; |
381 | new_mem->mm_node = NULL; | |
ba4e7d97 TH |
382 | |
383 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { | |
384 | ttm_tt_unbind(ttm); | |
385 | ttm_tt_destroy(ttm); | |
386 | bo->ttm = NULL; | |
387 | } | |
388 | ||
389 | out1: | |
eba67093 | 390 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
ba4e7d97 TH |
391 | out: |
392 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | |
b921bae2 | 393 | ttm_bo_mem_put(bo, &old_copy); |
ba4e7d97 TH |
394 | return ret; |
395 | } | |
396 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | |
397 | ||
398 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) | |
399 | { | |
400 | kfree(bo); | |
401 | } | |
402 | ||
403 | /** | |
404 | * ttm_buffer_object_transfer | |
405 | * | |
406 | * @bo: A pointer to a struct ttm_buffer_object. | |
407 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, | |
408 | * holding the data of @bo with the old placement. | |
409 | * | |
410 | * This is a utility function that may be called after an accelerated move | |
411 | * has been scheduled. A new buffer object is created as a placeholder for | |
412 | * the old data while it's being copied. When that buffer object is idle, | |
413 | * it can be destroyed, releasing the space of the old placement. | |
414 | * Returns: | |
415 | * !0: Failure. | |
416 | */ | |
417 | ||
418 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |
419 | struct ttm_buffer_object **new_obj) | |
420 | { | |
421 | struct ttm_buffer_object *fbo; | |
422 | struct ttm_bo_device *bdev = bo->bdev; | |
423 | struct ttm_bo_driver *driver = bdev->driver; | |
424 | ||
425 | fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); | |
426 | if (!fbo) | |
427 | return -ENOMEM; | |
428 | ||
429 | *fbo = *bo; | |
430 | ||
431 | /** | |
432 | * Fix up members that we shouldn't copy directly: | |
433 | * TODO: Explicit member copy would probably be better here. | |
434 | */ | |
435 | ||
ba4e7d97 TH |
436 | init_waitqueue_head(&fbo->event_queue); |
437 | INIT_LIST_HEAD(&fbo->ddestroy); | |
438 | INIT_LIST_HEAD(&fbo->lru); | |
439 | INIT_LIST_HEAD(&fbo->swap); | |
eba67093 | 440 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
ba4e7d97 | 441 | fbo->vm_node = NULL; |
0fbecd40 | 442 | atomic_set(&fbo->cpu_writers, 0); |
ba4e7d97 TH |
443 | |
444 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | |
ba4e7d97 TH |
445 | kref_init(&fbo->list_kref); |
446 | kref_init(&fbo->kref); | |
447 | fbo->destroy = &ttm_transfered_destroy; | |
57de4ba9 | 448 | fbo->acc_size = 0; |
ba4e7d97 TH |
449 | |
450 | *new_obj = fbo; | |
451 | return 0; | |
452 | } | |
453 | ||
454 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |
455 | { | |
456 | #if defined(__i386__) || defined(__x86_64__) | |
457 | if (caching_flags & TTM_PL_FLAG_WC) | |
458 | tmp = pgprot_writecombine(tmp); | |
459 | else if (boot_cpu_data.x86 > 3) | |
460 | tmp = pgprot_noncached(tmp); | |
461 | ||
462 | #elif defined(__powerpc__) | |
463 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) { | |
464 | pgprot_val(tmp) |= _PAGE_NO_CACHE; | |
465 | if (caching_flags & TTM_PL_FLAG_UNCACHED) | |
466 | pgprot_val(tmp) |= _PAGE_GUARDED; | |
467 | } | |
468 | #endif | |
469 | #if defined(__ia64__) | |
470 | if (caching_flags & TTM_PL_FLAG_WC) | |
471 | tmp = pgprot_writecombine(tmp); | |
472 | else | |
473 | tmp = pgprot_noncached(tmp); | |
474 | #endif | |
475 | #if defined(__sparc__) | |
476 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) | |
477 | tmp = pgprot_noncached(tmp); | |
478 | #endif | |
479 | return tmp; | |
480 | } | |
4bfd75cb | 481 | EXPORT_SYMBOL(ttm_io_prot); |
ba4e7d97 TH |
482 | |
483 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | |
82c5da6b JG |
484 | unsigned long offset, |
485 | unsigned long size, | |
ba4e7d97 TH |
486 | struct ttm_bo_kmap_obj *map) |
487 | { | |
ba4e7d97 | 488 | struct ttm_mem_reg *mem = &bo->mem; |
ba4e7d97 | 489 | |
82c5da6b | 490 | if (bo->mem.bus.addr) { |
ba4e7d97 | 491 | map->bo_kmap_type = ttm_bo_map_premapped; |
82c5da6b | 492 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
ba4e7d97 TH |
493 | } else { |
494 | map->bo_kmap_type = ttm_bo_map_iomap; | |
495 | if (mem->placement & TTM_PL_FLAG_WC) | |
82c5da6b JG |
496 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
497 | size); | |
ba4e7d97 | 498 | else |
82c5da6b JG |
499 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
500 | size); | |
ba4e7d97 TH |
501 | } |
502 | return (!map->virtual) ? -ENOMEM : 0; | |
503 | } | |
504 | ||
505 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | |
506 | unsigned long start_page, | |
507 | unsigned long num_pages, | |
508 | struct ttm_bo_kmap_obj *map) | |
509 | { | |
510 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; | |
511 | struct ttm_tt *ttm = bo->ttm; | |
b1e5f172 | 512 | int ret; |
ba4e7d97 TH |
513 | |
514 | BUG_ON(!ttm); | |
b1e5f172 JG |
515 | |
516 | if (ttm->state == tt_unpopulated) { | |
517 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); | |
518 | if (ret) | |
519 | return ret; | |
520 | } | |
521 | ||
ba4e7d97 TH |
522 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
523 | /* | |
524 | * We're mapping a single page, and the desired | |
525 | * page protection is consistent with the bo. | |
526 | */ | |
527 | ||
528 | map->bo_kmap_type = ttm_bo_map_kmap; | |
b1e5f172 | 529 | map->page = ttm->pages[start_page]; |
ba4e7d97 TH |
530 | map->virtual = kmap(map->page); |
531 | } else { | |
ba4e7d97 TH |
532 | /* |
533 | * We need to use vmap to get the desired page protection | |
af901ca1 | 534 | * or to make the buffer object look contiguous. |
ba4e7d97 TH |
535 | */ |
536 | prot = (mem->placement & TTM_PL_FLAG_CACHED) ? | |
537 | PAGE_KERNEL : | |
538 | ttm_io_prot(mem->placement, PAGE_KERNEL); | |
539 | map->bo_kmap_type = ttm_bo_map_vmap; | |
540 | map->virtual = vmap(ttm->pages + start_page, num_pages, | |
541 | 0, prot); | |
542 | } | |
543 | return (!map->virtual) ? -ENOMEM : 0; | |
544 | } | |
545 | ||
546 | int ttm_bo_kmap(struct ttm_buffer_object *bo, | |
547 | unsigned long start_page, unsigned long num_pages, | |
548 | struct ttm_bo_kmap_obj *map) | |
549 | { | |
eba67093 TH |
550 | struct ttm_mem_type_manager *man = |
551 | &bo->bdev->man[bo->mem.mem_type]; | |
82c5da6b | 552 | unsigned long offset, size; |
ba4e7d97 | 553 | int ret; |
ba4e7d97 TH |
554 | |
555 | BUG_ON(!list_empty(&bo->swap)); | |
556 | map->virtual = NULL; | |
82c5da6b | 557 | map->bo = bo; |
ba4e7d97 TH |
558 | if (num_pages > bo->num_pages) |
559 | return -EINVAL; | |
560 | if (start_page > bo->num_pages) | |
561 | return -EINVAL; | |
562 | #if 0 | |
563 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | |
564 | return -EPERM; | |
565 | #endif | |
eba67093 | 566 | (void) ttm_mem_io_lock(man, false); |
82c5da6b | 567 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
eba67093 | 568 | ttm_mem_io_unlock(man); |
ba4e7d97 TH |
569 | if (ret) |
570 | return ret; | |
82c5da6b | 571 | if (!bo->mem.bus.is_iomem) { |
ba4e7d97 TH |
572 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
573 | } else { | |
82c5da6b JG |
574 | offset = start_page << PAGE_SHIFT; |
575 | size = num_pages << PAGE_SHIFT; | |
576 | return ttm_bo_ioremap(bo, offset, size, map); | |
ba4e7d97 TH |
577 | } |
578 | } | |
579 | EXPORT_SYMBOL(ttm_bo_kmap); | |
580 | ||
581 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |
582 | { | |
eba67093 TH |
583 | struct ttm_buffer_object *bo = map->bo; |
584 | struct ttm_mem_type_manager *man = | |
585 | &bo->bdev->man[bo->mem.mem_type]; | |
586 | ||
ba4e7d97 TH |
587 | if (!map->virtual) |
588 | return; | |
589 | switch (map->bo_kmap_type) { | |
590 | case ttm_bo_map_iomap: | |
591 | iounmap(map->virtual); | |
592 | break; | |
593 | case ttm_bo_map_vmap: | |
594 | vunmap(map->virtual); | |
595 | break; | |
596 | case ttm_bo_map_kmap: | |
597 | kunmap(map->page); | |
598 | break; | |
599 | case ttm_bo_map_premapped: | |
600 | break; | |
601 | default: | |
602 | BUG(); | |
603 | } | |
eba67093 TH |
604 | (void) ttm_mem_io_lock(man, false); |
605 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | |
606 | ttm_mem_io_unlock(man); | |
ba4e7d97 TH |
607 | map->virtual = NULL; |
608 | map->page = NULL; | |
609 | } | |
610 | EXPORT_SYMBOL(ttm_bo_kunmap); | |
611 | ||
ba4e7d97 TH |
612 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
613 | void *sync_obj, | |
614 | void *sync_obj_arg, | |
9d87fa21 JG |
615 | bool evict, bool no_wait_reserve, |
616 | bool no_wait_gpu, | |
ba4e7d97 TH |
617 | struct ttm_mem_reg *new_mem) |
618 | { | |
619 | struct ttm_bo_device *bdev = bo->bdev; | |
620 | struct ttm_bo_driver *driver = bdev->driver; | |
621 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | |
622 | struct ttm_mem_reg *old_mem = &bo->mem; | |
623 | int ret; | |
ba4e7d97 TH |
624 | struct ttm_buffer_object *ghost_obj; |
625 | void *tmp_obj = NULL; | |
626 | ||
702adba2 | 627 | spin_lock(&bdev->fence_lock); |
ba4e7d97 TH |
628 | if (bo->sync_obj) { |
629 | tmp_obj = bo->sync_obj; | |
630 | bo->sync_obj = NULL; | |
631 | } | |
632 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | |
633 | bo->sync_obj_arg = sync_obj_arg; | |
634 | if (evict) { | |
635 | ret = ttm_bo_wait(bo, false, false, false); | |
702adba2 | 636 | spin_unlock(&bdev->fence_lock); |
4677f15c TH |
637 | if (tmp_obj) |
638 | driver->sync_obj_unref(&tmp_obj); | |
ba4e7d97 TH |
639 | if (ret) |
640 | return ret; | |
641 | ||
ba4e7d97 TH |
642 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
643 | (bo->ttm != NULL)) { | |
644 | ttm_tt_unbind(bo->ttm); | |
645 | ttm_tt_destroy(bo->ttm); | |
646 | bo->ttm = NULL; | |
647 | } | |
eac20953 | 648 | ttm_bo_free_old_node(bo); |
ba4e7d97 TH |
649 | } else { |
650 | /** | |
651 | * This should help pipeline ordinary buffer moves. | |
652 | * | |
653 | * Hang old buffer memory on a new buffer object, | |
654 | * and leave it to be released when the GPU | |
655 | * operation has completed. | |
656 | */ | |
657 | ||
658 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | |
702adba2 | 659 | spin_unlock(&bdev->fence_lock); |
4677f15c TH |
660 | if (tmp_obj) |
661 | driver->sync_obj_unref(&tmp_obj); | |
ba4e7d97 TH |
662 | |
663 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | |
664 | if (ret) | |
665 | return ret; | |
666 | ||
667 | /** | |
668 | * If we're not moving to fixed memory, the TTM object | |
669 | * needs to stay alive. Otherwhise hang it on the ghost | |
670 | * bo to be unbound and destroyed. | |
671 | */ | |
672 | ||
673 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
674 | ghost_obj->ttm = NULL; | |
675 | else | |
676 | bo->ttm = NULL; | |
677 | ||
678 | ttm_bo_unreserve(ghost_obj); | |
679 | ttm_bo_unref(&ghost_obj); | |
680 | } | |
681 | ||
682 | *old_mem = *new_mem; | |
683 | new_mem->mm_node = NULL; | |
110b20c3 | 684 | |
ba4e7d97 TH |
685 | return 0; |
686 | } | |
687 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |