]>
Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
31 | #include "ttm/ttm_bo_driver.h" | |
32 | #include "ttm/ttm_placement.h" | |
33 | #include <linux/io.h> | |
34 | #include <linux/highmem.h> | |
35 | #include <linux/wait.h> | |
5a0e3ad6 | 36 | #include <linux/slab.h> |
ba4e7d97 | 37 | #include <linux/vmalloc.h> |
ba4e7d97 TH |
38 | #include <linux/module.h> |
39 | ||
40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | |
41 | { | |
42311ff9 | 42 | ttm_bo_mem_put(bo, &bo->mem); |
ba4e7d97 TH |
43 | } |
44 | ||
45 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |
9d87fa21 JG |
46 | bool evict, bool no_wait_reserve, |
47 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) | |
ba4e7d97 TH |
48 | { |
49 | struct ttm_tt *ttm = bo->ttm; | |
50 | struct ttm_mem_reg *old_mem = &bo->mem; | |
ba4e7d97 TH |
51 | int ret; |
52 | ||
53 | if (old_mem->mem_type != TTM_PL_SYSTEM) { | |
54 | ttm_tt_unbind(ttm); | |
55 | ttm_bo_free_old_node(bo); | |
56 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, | |
57 | TTM_PL_MASK_MEM); | |
58 | old_mem->mem_type = TTM_PL_SYSTEM; | |
ba4e7d97 TH |
59 | } |
60 | ||
61 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); | |
62 | if (unlikely(ret != 0)) | |
63 | return ret; | |
64 | ||
65 | if (new_mem->mem_type != TTM_PL_SYSTEM) { | |
66 | ret = ttm_tt_bind(ttm, new_mem); | |
67 | if (unlikely(ret != 0)) | |
68 | return ret; | |
69 | } | |
70 | ||
71 | *old_mem = *new_mem; | |
72 | new_mem->mm_node = NULL; | |
110b20c3 | 73 | |
ba4e7d97 TH |
74 | return 0; |
75 | } | |
76 | EXPORT_SYMBOL(ttm_bo_move_ttm); | |
77 | ||
82c5da6b JG |
78 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
79 | { | |
82c5da6b JG |
80 | int ret; |
81 | ||
0c321c79 JG |
82 | if (!mem->bus.io_reserved) { |
83 | mem->bus.io_reserved = true; | |
84 | ret = bdev->driver->io_mem_reserve(bdev, mem); | |
82c5da6b JG |
85 | if (unlikely(ret != 0)) |
86 | return ret; | |
82c5da6b JG |
87 | } |
88 | return 0; | |
89 | } | |
90 | ||
91 | void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
92 | { | |
93 | if (bdev->driver->io_mem_reserve) { | |
94 | if (mem->bus.io_reserved) { | |
95 | mem->bus.io_reserved = false; | |
96 | bdev->driver->io_mem_free(bdev, mem); | |
97 | } | |
98 | } | |
99 | } | |
100 | ||
ba4e7d97 TH |
101 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
102 | void **virtual) | |
103 | { | |
ba4e7d97 TH |
104 | int ret; |
105 | void *addr; | |
106 | ||
107 | *virtual = NULL; | |
82c5da6b | 108 | ret = ttm_mem_io_reserve(bdev, mem); |
9e51159c | 109 | if (ret || !mem->bus.is_iomem) |
ba4e7d97 TH |
110 | return ret; |
111 | ||
82c5da6b JG |
112 | if (mem->bus.addr) { |
113 | addr = mem->bus.addr; | |
114 | } else { | |
ba4e7d97 | 115 | if (mem->placement & TTM_PL_FLAG_WC) |
82c5da6b | 116 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
ba4e7d97 | 117 | else |
82c5da6b JG |
118 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
119 | if (!addr) { | |
120 | ttm_mem_io_free(bdev, mem); | |
ba4e7d97 | 121 | return -ENOMEM; |
82c5da6b | 122 | } |
ba4e7d97 TH |
123 | } |
124 | *virtual = addr; | |
125 | return 0; | |
126 | } | |
127 | ||
128 | void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |
129 | void *virtual) | |
130 | { | |
131 | struct ttm_mem_type_manager *man; | |
132 | ||
133 | man = &bdev->man[mem->mem_type]; | |
134 | ||
0c321c79 | 135 | if (virtual && mem->bus.addr == NULL) |
ba4e7d97 | 136 | iounmap(virtual); |
82c5da6b | 137 | ttm_mem_io_free(bdev, mem); |
ba4e7d97 TH |
138 | } |
139 | ||
140 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |
141 | { | |
142 | uint32_t *dstP = | |
143 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); | |
144 | uint32_t *srcP = | |
145 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); | |
146 | ||
147 | int i; | |
148 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) | |
149 | iowrite32(ioread32(srcP++), dstP++); | |
150 | return 0; | |
151 | } | |
152 | ||
153 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |
542c6f6d TH |
154 | unsigned long page, |
155 | pgprot_t prot) | |
ba4e7d97 TH |
156 | { |
157 | struct page *d = ttm_tt_get_page(ttm, page); | |
158 | void *dst; | |
159 | ||
160 | if (!d) | |
161 | return -ENOMEM; | |
162 | ||
163 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | |
542c6f6d TH |
164 | |
165 | #ifdef CONFIG_X86 | |
3e4d3af5 | 166 | dst = kmap_atomic_prot(d, prot); |
542c6f6d | 167 | #else |
6d0897ba | 168 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
169 | dst = vmap(&d, 1, 0, prot); |
170 | else | |
171 | dst = kmap(d); | |
172 | #endif | |
ba4e7d97 TH |
173 | if (!dst) |
174 | return -ENOMEM; | |
175 | ||
176 | memcpy_fromio(dst, src, PAGE_SIZE); | |
542c6f6d TH |
177 | |
178 | #ifdef CONFIG_X86 | |
3e4d3af5 | 179 | kunmap_atomic(dst); |
542c6f6d | 180 | #else |
6d0897ba | 181 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
182 | vunmap(dst); |
183 | else | |
184 | kunmap(d); | |
185 | #endif | |
186 | ||
ba4e7d97 TH |
187 | return 0; |
188 | } | |
189 | ||
190 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |
542c6f6d TH |
191 | unsigned long page, |
192 | pgprot_t prot) | |
ba4e7d97 TH |
193 | { |
194 | struct page *s = ttm_tt_get_page(ttm, page); | |
195 | void *src; | |
196 | ||
197 | if (!s) | |
198 | return -ENOMEM; | |
199 | ||
200 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | |
542c6f6d | 201 | #ifdef CONFIG_X86 |
3e4d3af5 | 202 | src = kmap_atomic_prot(s, prot); |
542c6f6d | 203 | #else |
6d0897ba | 204 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
205 | src = vmap(&s, 1, 0, prot); |
206 | else | |
207 | src = kmap(s); | |
208 | #endif | |
ba4e7d97 TH |
209 | if (!src) |
210 | return -ENOMEM; | |
211 | ||
212 | memcpy_toio(dst, src, PAGE_SIZE); | |
542c6f6d TH |
213 | |
214 | #ifdef CONFIG_X86 | |
3e4d3af5 | 215 | kunmap_atomic(src); |
542c6f6d | 216 | #else |
6d0897ba | 217 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
218 | vunmap(src); |
219 | else | |
220 | kunmap(s); | |
221 | #endif | |
222 | ||
ba4e7d97 TH |
223 | return 0; |
224 | } | |
225 | ||
226 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |
9d87fa21 JG |
227 | bool evict, bool no_wait_reserve, bool no_wait_gpu, |
228 | struct ttm_mem_reg *new_mem) | |
ba4e7d97 TH |
229 | { |
230 | struct ttm_bo_device *bdev = bo->bdev; | |
231 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | |
232 | struct ttm_tt *ttm = bo->ttm; | |
233 | struct ttm_mem_reg *old_mem = &bo->mem; | |
234 | struct ttm_mem_reg old_copy = *old_mem; | |
235 | void *old_iomap; | |
236 | void *new_iomap; | |
237 | int ret; | |
ba4e7d97 TH |
238 | unsigned long i; |
239 | unsigned long page; | |
240 | unsigned long add = 0; | |
241 | int dir; | |
242 | ||
243 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); | |
244 | if (ret) | |
245 | return ret; | |
246 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); | |
247 | if (ret) | |
248 | goto out; | |
249 | ||
250 | if (old_iomap == NULL && new_iomap == NULL) | |
251 | goto out2; | |
252 | if (old_iomap == NULL && ttm == NULL) | |
253 | goto out2; | |
254 | ||
255 | add = 0; | |
256 | dir = 1; | |
257 | ||
258 | if ((old_mem->mem_type == new_mem->mem_type) && | |
d961db75 | 259 | (new_mem->start < old_mem->start + old_mem->size)) { |
ba4e7d97 TH |
260 | dir = -1; |
261 | add = new_mem->num_pages - 1; | |
262 | } | |
263 | ||
264 | for (i = 0; i < new_mem->num_pages; ++i) { | |
265 | page = i * dir + add; | |
542c6f6d TH |
266 | if (old_iomap == NULL) { |
267 | pgprot_t prot = ttm_io_prot(old_mem->placement, | |
268 | PAGE_KERNEL); | |
269 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, | |
270 | prot); | |
271 | } else if (new_iomap == NULL) { | |
272 | pgprot_t prot = ttm_io_prot(new_mem->placement, | |
273 | PAGE_KERNEL); | |
274 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, | |
275 | prot); | |
276 | } else | |
ba4e7d97 TH |
277 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
278 | if (ret) | |
279 | goto out1; | |
280 | } | |
281 | mb(); | |
282 | out2: | |
283 | ttm_bo_free_old_node(bo); | |
284 | ||
285 | *old_mem = *new_mem; | |
286 | new_mem->mm_node = NULL; | |
ba4e7d97 TH |
287 | |
288 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { | |
289 | ttm_tt_unbind(ttm); | |
290 | ttm_tt_destroy(ttm); | |
291 | bo->ttm = NULL; | |
292 | } | |
293 | ||
294 | out1: | |
295 | ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); | |
296 | out: | |
297 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | |
298 | return ret; | |
299 | } | |
300 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | |
301 | ||
302 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) | |
303 | { | |
304 | kfree(bo); | |
305 | } | |
306 | ||
307 | /** | |
308 | * ttm_buffer_object_transfer | |
309 | * | |
310 | * @bo: A pointer to a struct ttm_buffer_object. | |
311 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, | |
312 | * holding the data of @bo with the old placement. | |
313 | * | |
314 | * This is a utility function that may be called after an accelerated move | |
315 | * has been scheduled. A new buffer object is created as a placeholder for | |
316 | * the old data while it's being copied. When that buffer object is idle, | |
317 | * it can be destroyed, releasing the space of the old placement. | |
318 | * Returns: | |
319 | * !0: Failure. | |
320 | */ | |
321 | ||
322 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |
323 | struct ttm_buffer_object **new_obj) | |
324 | { | |
325 | struct ttm_buffer_object *fbo; | |
326 | struct ttm_bo_device *bdev = bo->bdev; | |
327 | struct ttm_bo_driver *driver = bdev->driver; | |
328 | ||
329 | fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); | |
330 | if (!fbo) | |
331 | return -ENOMEM; | |
332 | ||
333 | *fbo = *bo; | |
334 | ||
335 | /** | |
336 | * Fix up members that we shouldn't copy directly: | |
337 | * TODO: Explicit member copy would probably be better here. | |
338 | */ | |
339 | ||
ba4e7d97 TH |
340 | init_waitqueue_head(&fbo->event_queue); |
341 | INIT_LIST_HEAD(&fbo->ddestroy); | |
342 | INIT_LIST_HEAD(&fbo->lru); | |
343 | INIT_LIST_HEAD(&fbo->swap); | |
344 | fbo->vm_node = NULL; | |
0fbecd40 | 345 | atomic_set(&fbo->cpu_writers, 0); |
ba4e7d97 TH |
346 | |
347 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | |
ba4e7d97 TH |
348 | kref_init(&fbo->list_kref); |
349 | kref_init(&fbo->kref); | |
350 | fbo->destroy = &ttm_transfered_destroy; | |
351 | ||
352 | *new_obj = fbo; | |
353 | return 0; | |
354 | } | |
355 | ||
356 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |
357 | { | |
358 | #if defined(__i386__) || defined(__x86_64__) | |
359 | if (caching_flags & TTM_PL_FLAG_WC) | |
360 | tmp = pgprot_writecombine(tmp); | |
361 | else if (boot_cpu_data.x86 > 3) | |
362 | tmp = pgprot_noncached(tmp); | |
363 | ||
364 | #elif defined(__powerpc__) | |
365 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) { | |
366 | pgprot_val(tmp) |= _PAGE_NO_CACHE; | |
367 | if (caching_flags & TTM_PL_FLAG_UNCACHED) | |
368 | pgprot_val(tmp) |= _PAGE_GUARDED; | |
369 | } | |
370 | #endif | |
371 | #if defined(__ia64__) | |
372 | if (caching_flags & TTM_PL_FLAG_WC) | |
373 | tmp = pgprot_writecombine(tmp); | |
374 | else | |
375 | tmp = pgprot_noncached(tmp); | |
376 | #endif | |
377 | #if defined(__sparc__) | |
378 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) | |
379 | tmp = pgprot_noncached(tmp); | |
380 | #endif | |
381 | return tmp; | |
382 | } | |
4bfd75cb | 383 | EXPORT_SYMBOL(ttm_io_prot); |
ba4e7d97 TH |
384 | |
385 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | |
82c5da6b JG |
386 | unsigned long offset, |
387 | unsigned long size, | |
ba4e7d97 TH |
388 | struct ttm_bo_kmap_obj *map) |
389 | { | |
ba4e7d97 | 390 | struct ttm_mem_reg *mem = &bo->mem; |
ba4e7d97 | 391 | |
82c5da6b | 392 | if (bo->mem.bus.addr) { |
ba4e7d97 | 393 | map->bo_kmap_type = ttm_bo_map_premapped; |
82c5da6b | 394 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
ba4e7d97 TH |
395 | } else { |
396 | map->bo_kmap_type = ttm_bo_map_iomap; | |
397 | if (mem->placement & TTM_PL_FLAG_WC) | |
82c5da6b JG |
398 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
399 | size); | |
ba4e7d97 | 400 | else |
82c5da6b JG |
401 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
402 | size); | |
ba4e7d97 TH |
403 | } |
404 | return (!map->virtual) ? -ENOMEM : 0; | |
405 | } | |
406 | ||
407 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | |
408 | unsigned long start_page, | |
409 | unsigned long num_pages, | |
410 | struct ttm_bo_kmap_obj *map) | |
411 | { | |
412 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; | |
413 | struct ttm_tt *ttm = bo->ttm; | |
414 | struct page *d; | |
415 | int i; | |
416 | ||
417 | BUG_ON(!ttm); | |
418 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { | |
419 | /* | |
420 | * We're mapping a single page, and the desired | |
421 | * page protection is consistent with the bo. | |
422 | */ | |
423 | ||
424 | map->bo_kmap_type = ttm_bo_map_kmap; | |
425 | map->page = ttm_tt_get_page(ttm, start_page); | |
426 | map->virtual = kmap(map->page); | |
427 | } else { | |
428 | /* | |
429 | * Populate the part we're mapping; | |
430 | */ | |
431 | for (i = start_page; i < start_page + num_pages; ++i) { | |
432 | d = ttm_tt_get_page(ttm, i); | |
433 | if (!d) | |
434 | return -ENOMEM; | |
435 | } | |
436 | ||
437 | /* | |
438 | * We need to use vmap to get the desired page protection | |
af901ca1 | 439 | * or to make the buffer object look contiguous. |
ba4e7d97 TH |
440 | */ |
441 | prot = (mem->placement & TTM_PL_FLAG_CACHED) ? | |
442 | PAGE_KERNEL : | |
443 | ttm_io_prot(mem->placement, PAGE_KERNEL); | |
444 | map->bo_kmap_type = ttm_bo_map_vmap; | |
445 | map->virtual = vmap(ttm->pages + start_page, num_pages, | |
446 | 0, prot); | |
447 | } | |
448 | return (!map->virtual) ? -ENOMEM : 0; | |
449 | } | |
450 | ||
451 | int ttm_bo_kmap(struct ttm_buffer_object *bo, | |
452 | unsigned long start_page, unsigned long num_pages, | |
453 | struct ttm_bo_kmap_obj *map) | |
454 | { | |
82c5da6b | 455 | unsigned long offset, size; |
ba4e7d97 | 456 | int ret; |
ba4e7d97 TH |
457 | |
458 | BUG_ON(!list_empty(&bo->swap)); | |
459 | map->virtual = NULL; | |
82c5da6b | 460 | map->bo = bo; |
ba4e7d97 TH |
461 | if (num_pages > bo->num_pages) |
462 | return -EINVAL; | |
463 | if (start_page > bo->num_pages) | |
464 | return -EINVAL; | |
465 | #if 0 | |
466 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | |
467 | return -EPERM; | |
468 | #endif | |
82c5da6b | 469 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
ba4e7d97 TH |
470 | if (ret) |
471 | return ret; | |
82c5da6b | 472 | if (!bo->mem.bus.is_iomem) { |
ba4e7d97 TH |
473 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
474 | } else { | |
82c5da6b JG |
475 | offset = start_page << PAGE_SHIFT; |
476 | size = num_pages << PAGE_SHIFT; | |
477 | return ttm_bo_ioremap(bo, offset, size, map); | |
ba4e7d97 TH |
478 | } |
479 | } | |
480 | EXPORT_SYMBOL(ttm_bo_kmap); | |
481 | ||
482 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |
483 | { | |
484 | if (!map->virtual) | |
485 | return; | |
486 | switch (map->bo_kmap_type) { | |
487 | case ttm_bo_map_iomap: | |
488 | iounmap(map->virtual); | |
82c5da6b | 489 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
ba4e7d97 TH |
490 | break; |
491 | case ttm_bo_map_vmap: | |
492 | vunmap(map->virtual); | |
493 | break; | |
494 | case ttm_bo_map_kmap: | |
495 | kunmap(map->page); | |
496 | break; | |
497 | case ttm_bo_map_premapped: | |
498 | break; | |
499 | default: | |
500 | BUG(); | |
501 | } | |
502 | map->virtual = NULL; | |
503 | map->page = NULL; | |
504 | } | |
505 | EXPORT_SYMBOL(ttm_bo_kunmap); | |
506 | ||
ba4e7d97 TH |
507 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
508 | void *sync_obj, | |
509 | void *sync_obj_arg, | |
9d87fa21 JG |
510 | bool evict, bool no_wait_reserve, |
511 | bool no_wait_gpu, | |
ba4e7d97 TH |
512 | struct ttm_mem_reg *new_mem) |
513 | { | |
514 | struct ttm_bo_device *bdev = bo->bdev; | |
515 | struct ttm_bo_driver *driver = bdev->driver; | |
516 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | |
517 | struct ttm_mem_reg *old_mem = &bo->mem; | |
518 | int ret; | |
ba4e7d97 TH |
519 | struct ttm_buffer_object *ghost_obj; |
520 | void *tmp_obj = NULL; | |
521 | ||
702adba2 | 522 | spin_lock(&bdev->fence_lock); |
ba4e7d97 TH |
523 | if (bo->sync_obj) { |
524 | tmp_obj = bo->sync_obj; | |
525 | bo->sync_obj = NULL; | |
526 | } | |
527 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | |
528 | bo->sync_obj_arg = sync_obj_arg; | |
529 | if (evict) { | |
530 | ret = ttm_bo_wait(bo, false, false, false); | |
702adba2 | 531 | spin_unlock(&bdev->fence_lock); |
4677f15c TH |
532 | if (tmp_obj) |
533 | driver->sync_obj_unref(&tmp_obj); | |
ba4e7d97 TH |
534 | if (ret) |
535 | return ret; | |
536 | ||
537 | ttm_bo_free_old_node(bo); | |
538 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && | |
539 | (bo->ttm != NULL)) { | |
540 | ttm_tt_unbind(bo->ttm); | |
541 | ttm_tt_destroy(bo->ttm); | |
542 | bo->ttm = NULL; | |
543 | } | |
544 | } else { | |
545 | /** | |
546 | * This should help pipeline ordinary buffer moves. | |
547 | * | |
548 | * Hang old buffer memory on a new buffer object, | |
549 | * and leave it to be released when the GPU | |
550 | * operation has completed. | |
551 | */ | |
552 | ||
553 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | |
702adba2 | 554 | spin_unlock(&bdev->fence_lock); |
4677f15c TH |
555 | if (tmp_obj) |
556 | driver->sync_obj_unref(&tmp_obj); | |
ba4e7d97 TH |
557 | |
558 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | |
559 | if (ret) | |
560 | return ret; | |
561 | ||
562 | /** | |
563 | * If we're not moving to fixed memory, the TTM object | |
564 | * needs to stay alive. Otherwhise hang it on the ghost | |
565 | * bo to be unbound and destroyed. | |
566 | */ | |
567 | ||
568 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
569 | ghost_obj->ttm = NULL; | |
570 | else | |
571 | bo->ttm = NULL; | |
572 | ||
573 | ttm_bo_unreserve(ghost_obj); | |
574 | ttm_bo_unref(&ghost_obj); | |
575 | } | |
576 | ||
577 | *old_mem = *new_mem; | |
578 | new_mem->mm_node = NULL; | |
110b20c3 | 579 | |
ba4e7d97 TH |
580 | return 0; |
581 | } | |
582 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |