]>
Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
31 | #include "ttm/ttm_bo_driver.h" | |
32 | #include "ttm/ttm_placement.h" | |
33 | #include <linux/io.h> | |
34 | #include <linux/highmem.h> | |
35 | #include <linux/wait.h> | |
36 | #include <linux/vmalloc.h> | |
ba4e7d97 TH |
37 | #include <linux/module.h> |
38 | ||
39 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | |
40 | { | |
41 | struct ttm_mem_reg *old_mem = &bo->mem; | |
42 | ||
43 | if (old_mem->mm_node) { | |
a987fcaa | 44 | spin_lock(&bo->glob->lru_lock); |
ba4e7d97 | 45 | drm_mm_put_block(old_mem->mm_node); |
a987fcaa | 46 | spin_unlock(&bo->glob->lru_lock); |
ba4e7d97 TH |
47 | } |
48 | old_mem->mm_node = NULL; | |
49 | } | |
50 | ||
51 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |
52 | bool evict, bool no_wait, struct ttm_mem_reg *new_mem) | |
53 | { | |
54 | struct ttm_tt *ttm = bo->ttm; | |
55 | struct ttm_mem_reg *old_mem = &bo->mem; | |
ba4e7d97 TH |
56 | int ret; |
57 | ||
58 | if (old_mem->mem_type != TTM_PL_SYSTEM) { | |
59 | ttm_tt_unbind(ttm); | |
60 | ttm_bo_free_old_node(bo); | |
61 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, | |
62 | TTM_PL_MASK_MEM); | |
63 | old_mem->mem_type = TTM_PL_SYSTEM; | |
ba4e7d97 TH |
64 | } |
65 | ||
66 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); | |
67 | if (unlikely(ret != 0)) | |
68 | return ret; | |
69 | ||
70 | if (new_mem->mem_type != TTM_PL_SYSTEM) { | |
71 | ret = ttm_tt_bind(ttm, new_mem); | |
72 | if (unlikely(ret != 0)) | |
73 | return ret; | |
74 | } | |
75 | ||
76 | *old_mem = *new_mem; | |
77 | new_mem->mm_node = NULL; | |
110b20c3 | 78 | |
ba4e7d97 TH |
79 | return 0; |
80 | } | |
81 | EXPORT_SYMBOL(ttm_bo_move_ttm); | |
82 | ||
83 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |
84 | void **virtual) | |
85 | { | |
86 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
87 | unsigned long bus_offset; | |
88 | unsigned long bus_size; | |
89 | unsigned long bus_base; | |
90 | int ret; | |
91 | void *addr; | |
92 | ||
93 | *virtual = NULL; | |
94 | ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); | |
95 | if (ret || bus_size == 0) | |
96 | return ret; | |
97 | ||
98 | if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) | |
99 | addr = (void *)(((u8 *) man->io_addr) + bus_offset); | |
100 | else { | |
101 | if (mem->placement & TTM_PL_FLAG_WC) | |
102 | addr = ioremap_wc(bus_base + bus_offset, bus_size); | |
103 | else | |
104 | addr = ioremap_nocache(bus_base + bus_offset, bus_size); | |
105 | if (!addr) | |
106 | return -ENOMEM; | |
107 | } | |
108 | *virtual = addr; | |
109 | return 0; | |
110 | } | |
111 | ||
112 | void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |
113 | void *virtual) | |
114 | { | |
115 | struct ttm_mem_type_manager *man; | |
116 | ||
117 | man = &bdev->man[mem->mem_type]; | |
118 | ||
119 | if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) | |
120 | iounmap(virtual); | |
121 | } | |
122 | ||
123 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |
124 | { | |
125 | uint32_t *dstP = | |
126 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); | |
127 | uint32_t *srcP = | |
128 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); | |
129 | ||
130 | int i; | |
131 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) | |
132 | iowrite32(ioread32(srcP++), dstP++); | |
133 | return 0; | |
134 | } | |
135 | ||
136 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |
542c6f6d TH |
137 | unsigned long page, |
138 | pgprot_t prot) | |
ba4e7d97 TH |
139 | { |
140 | struct page *d = ttm_tt_get_page(ttm, page); | |
141 | void *dst; | |
142 | ||
143 | if (!d) | |
144 | return -ENOMEM; | |
145 | ||
146 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | |
542c6f6d TH |
147 | |
148 | #ifdef CONFIG_X86 | |
149 | dst = kmap_atomic_prot(d, KM_USER0, prot); | |
150 | #else | |
6d0897ba | 151 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
152 | dst = vmap(&d, 1, 0, prot); |
153 | else | |
154 | dst = kmap(d); | |
155 | #endif | |
ba4e7d97 TH |
156 | if (!dst) |
157 | return -ENOMEM; | |
158 | ||
159 | memcpy_fromio(dst, src, PAGE_SIZE); | |
542c6f6d TH |
160 | |
161 | #ifdef CONFIG_X86 | |
162 | kunmap_atomic(dst, KM_USER0); | |
163 | #else | |
6d0897ba | 164 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
165 | vunmap(dst); |
166 | else | |
167 | kunmap(d); | |
168 | #endif | |
169 | ||
ba4e7d97 TH |
170 | return 0; |
171 | } | |
172 | ||
173 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |
542c6f6d TH |
174 | unsigned long page, |
175 | pgprot_t prot) | |
ba4e7d97 TH |
176 | { |
177 | struct page *s = ttm_tt_get_page(ttm, page); | |
178 | void *src; | |
179 | ||
180 | if (!s) | |
181 | return -ENOMEM; | |
182 | ||
183 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | |
542c6f6d TH |
184 | #ifdef CONFIG_X86 |
185 | src = kmap_atomic_prot(s, KM_USER0, prot); | |
186 | #else | |
6d0897ba | 187 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
188 | src = vmap(&s, 1, 0, prot); |
189 | else | |
190 | src = kmap(s); | |
191 | #endif | |
ba4e7d97 TH |
192 | if (!src) |
193 | return -ENOMEM; | |
194 | ||
195 | memcpy_toio(dst, src, PAGE_SIZE); | |
542c6f6d TH |
196 | |
197 | #ifdef CONFIG_X86 | |
198 | kunmap_atomic(src, KM_USER0); | |
199 | #else | |
6d0897ba | 200 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
542c6f6d TH |
201 | vunmap(src); |
202 | else | |
203 | kunmap(s); | |
204 | #endif | |
205 | ||
ba4e7d97 TH |
206 | return 0; |
207 | } | |
208 | ||
209 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |
210 | bool evict, bool no_wait, struct ttm_mem_reg *new_mem) | |
211 | { | |
212 | struct ttm_bo_device *bdev = bo->bdev; | |
213 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | |
214 | struct ttm_tt *ttm = bo->ttm; | |
215 | struct ttm_mem_reg *old_mem = &bo->mem; | |
216 | struct ttm_mem_reg old_copy = *old_mem; | |
217 | void *old_iomap; | |
218 | void *new_iomap; | |
219 | int ret; | |
ba4e7d97 TH |
220 | unsigned long i; |
221 | unsigned long page; | |
222 | unsigned long add = 0; | |
223 | int dir; | |
224 | ||
225 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); | |
226 | if (ret) | |
227 | return ret; | |
228 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); | |
229 | if (ret) | |
230 | goto out; | |
231 | ||
232 | if (old_iomap == NULL && new_iomap == NULL) | |
233 | goto out2; | |
234 | if (old_iomap == NULL && ttm == NULL) | |
235 | goto out2; | |
236 | ||
237 | add = 0; | |
238 | dir = 1; | |
239 | ||
240 | if ((old_mem->mem_type == new_mem->mem_type) && | |
241 | (new_mem->mm_node->start < | |
242 | old_mem->mm_node->start + old_mem->mm_node->size)) { | |
243 | dir = -1; | |
244 | add = new_mem->num_pages - 1; | |
245 | } | |
246 | ||
247 | for (i = 0; i < new_mem->num_pages; ++i) { | |
248 | page = i * dir + add; | |
542c6f6d TH |
249 | if (old_iomap == NULL) { |
250 | pgprot_t prot = ttm_io_prot(old_mem->placement, | |
251 | PAGE_KERNEL); | |
252 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, | |
253 | prot); | |
254 | } else if (new_iomap == NULL) { | |
255 | pgprot_t prot = ttm_io_prot(new_mem->placement, | |
256 | PAGE_KERNEL); | |
257 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, | |
258 | prot); | |
259 | } else | |
ba4e7d97 TH |
260 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
261 | if (ret) | |
262 | goto out1; | |
263 | } | |
264 | mb(); | |
265 | out2: | |
266 | ttm_bo_free_old_node(bo); | |
267 | ||
268 | *old_mem = *new_mem; | |
269 | new_mem->mm_node = NULL; | |
ba4e7d97 TH |
270 | |
271 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { | |
272 | ttm_tt_unbind(ttm); | |
273 | ttm_tt_destroy(ttm); | |
274 | bo->ttm = NULL; | |
275 | } | |
276 | ||
277 | out1: | |
278 | ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); | |
279 | out: | |
280 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | |
281 | return ret; | |
282 | } | |
283 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | |
284 | ||
285 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) | |
286 | { | |
287 | kfree(bo); | |
288 | } | |
289 | ||
290 | /** | |
291 | * ttm_buffer_object_transfer | |
292 | * | |
293 | * @bo: A pointer to a struct ttm_buffer_object. | |
294 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, | |
295 | * holding the data of @bo with the old placement. | |
296 | * | |
297 | * This is a utility function that may be called after an accelerated move | |
298 | * has been scheduled. A new buffer object is created as a placeholder for | |
299 | * the old data while it's being copied. When that buffer object is idle, | |
300 | * it can be destroyed, releasing the space of the old placement. | |
301 | * Returns: | |
302 | * !0: Failure. | |
303 | */ | |
304 | ||
305 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |
306 | struct ttm_buffer_object **new_obj) | |
307 | { | |
308 | struct ttm_buffer_object *fbo; | |
309 | struct ttm_bo_device *bdev = bo->bdev; | |
310 | struct ttm_bo_driver *driver = bdev->driver; | |
311 | ||
312 | fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); | |
313 | if (!fbo) | |
314 | return -ENOMEM; | |
315 | ||
316 | *fbo = *bo; | |
317 | ||
318 | /** | |
319 | * Fix up members that we shouldn't copy directly: | |
320 | * TODO: Explicit member copy would probably be better here. | |
321 | */ | |
322 | ||
323 | spin_lock_init(&fbo->lock); | |
324 | init_waitqueue_head(&fbo->event_queue); | |
325 | INIT_LIST_HEAD(&fbo->ddestroy); | |
326 | INIT_LIST_HEAD(&fbo->lru); | |
327 | INIT_LIST_HEAD(&fbo->swap); | |
328 | fbo->vm_node = NULL; | |
329 | ||
330 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | |
331 | if (fbo->mem.mm_node) | |
332 | fbo->mem.mm_node->private = (void *)fbo; | |
333 | kref_init(&fbo->list_kref); | |
334 | kref_init(&fbo->kref); | |
335 | fbo->destroy = &ttm_transfered_destroy; | |
336 | ||
337 | *new_obj = fbo; | |
338 | return 0; | |
339 | } | |
340 | ||
341 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |
342 | { | |
343 | #if defined(__i386__) || defined(__x86_64__) | |
344 | if (caching_flags & TTM_PL_FLAG_WC) | |
345 | tmp = pgprot_writecombine(tmp); | |
346 | else if (boot_cpu_data.x86 > 3) | |
347 | tmp = pgprot_noncached(tmp); | |
348 | ||
349 | #elif defined(__powerpc__) | |
350 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) { | |
351 | pgprot_val(tmp) |= _PAGE_NO_CACHE; | |
352 | if (caching_flags & TTM_PL_FLAG_UNCACHED) | |
353 | pgprot_val(tmp) |= _PAGE_GUARDED; | |
354 | } | |
355 | #endif | |
356 | #if defined(__ia64__) | |
357 | if (caching_flags & TTM_PL_FLAG_WC) | |
358 | tmp = pgprot_writecombine(tmp); | |
359 | else | |
360 | tmp = pgprot_noncached(tmp); | |
361 | #endif | |
362 | #if defined(__sparc__) | |
363 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) | |
364 | tmp = pgprot_noncached(tmp); | |
365 | #endif | |
366 | return tmp; | |
367 | } | |
4bfd75cb | 368 | EXPORT_SYMBOL(ttm_io_prot); |
ba4e7d97 TH |
369 | |
370 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | |
371 | unsigned long bus_base, | |
372 | unsigned long bus_offset, | |
373 | unsigned long bus_size, | |
374 | struct ttm_bo_kmap_obj *map) | |
375 | { | |
376 | struct ttm_bo_device *bdev = bo->bdev; | |
377 | struct ttm_mem_reg *mem = &bo->mem; | |
378 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
379 | ||
380 | if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { | |
381 | map->bo_kmap_type = ttm_bo_map_premapped; | |
382 | map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); | |
383 | } else { | |
384 | map->bo_kmap_type = ttm_bo_map_iomap; | |
385 | if (mem->placement & TTM_PL_FLAG_WC) | |
386 | map->virtual = ioremap_wc(bus_base + bus_offset, | |
387 | bus_size); | |
388 | else | |
389 | map->virtual = ioremap_nocache(bus_base + bus_offset, | |
390 | bus_size); | |
391 | } | |
392 | return (!map->virtual) ? -ENOMEM : 0; | |
393 | } | |
394 | ||
395 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, | |
396 | unsigned long start_page, | |
397 | unsigned long num_pages, | |
398 | struct ttm_bo_kmap_obj *map) | |
399 | { | |
400 | struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; | |
401 | struct ttm_tt *ttm = bo->ttm; | |
402 | struct page *d; | |
403 | int i; | |
404 | ||
405 | BUG_ON(!ttm); | |
406 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { | |
407 | /* | |
408 | * We're mapping a single page, and the desired | |
409 | * page protection is consistent with the bo. | |
410 | */ | |
411 | ||
412 | map->bo_kmap_type = ttm_bo_map_kmap; | |
413 | map->page = ttm_tt_get_page(ttm, start_page); | |
414 | map->virtual = kmap(map->page); | |
415 | } else { | |
416 | /* | |
417 | * Populate the part we're mapping; | |
418 | */ | |
419 | for (i = start_page; i < start_page + num_pages; ++i) { | |
420 | d = ttm_tt_get_page(ttm, i); | |
421 | if (!d) | |
422 | return -ENOMEM; | |
423 | } | |
424 | ||
425 | /* | |
426 | * We need to use vmap to get the desired page protection | |
af901ca1 | 427 | * or to make the buffer object look contiguous. |
ba4e7d97 TH |
428 | */ |
429 | prot = (mem->placement & TTM_PL_FLAG_CACHED) ? | |
430 | PAGE_KERNEL : | |
431 | ttm_io_prot(mem->placement, PAGE_KERNEL); | |
432 | map->bo_kmap_type = ttm_bo_map_vmap; | |
433 | map->virtual = vmap(ttm->pages + start_page, num_pages, | |
434 | 0, prot); | |
435 | } | |
436 | return (!map->virtual) ? -ENOMEM : 0; | |
437 | } | |
438 | ||
439 | int ttm_bo_kmap(struct ttm_buffer_object *bo, | |
440 | unsigned long start_page, unsigned long num_pages, | |
441 | struct ttm_bo_kmap_obj *map) | |
442 | { | |
443 | int ret; | |
444 | unsigned long bus_base; | |
445 | unsigned long bus_offset; | |
446 | unsigned long bus_size; | |
447 | ||
448 | BUG_ON(!list_empty(&bo->swap)); | |
449 | map->virtual = NULL; | |
450 | if (num_pages > bo->num_pages) | |
451 | return -EINVAL; | |
452 | if (start_page > bo->num_pages) | |
453 | return -EINVAL; | |
454 | #if 0 | |
455 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | |
456 | return -EPERM; | |
457 | #endif | |
458 | ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, | |
459 | &bus_offset, &bus_size); | |
460 | if (ret) | |
461 | return ret; | |
462 | if (bus_size == 0) { | |
463 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); | |
464 | } else { | |
465 | bus_offset += start_page << PAGE_SHIFT; | |
466 | bus_size = num_pages << PAGE_SHIFT; | |
467 | return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); | |
468 | } | |
469 | } | |
470 | EXPORT_SYMBOL(ttm_bo_kmap); | |
471 | ||
472 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |
473 | { | |
474 | if (!map->virtual) | |
475 | return; | |
476 | switch (map->bo_kmap_type) { | |
477 | case ttm_bo_map_iomap: | |
478 | iounmap(map->virtual); | |
479 | break; | |
480 | case ttm_bo_map_vmap: | |
481 | vunmap(map->virtual); | |
482 | break; | |
483 | case ttm_bo_map_kmap: | |
484 | kunmap(map->page); | |
485 | break; | |
486 | case ttm_bo_map_premapped: | |
487 | break; | |
488 | default: | |
489 | BUG(); | |
490 | } | |
491 | map->virtual = NULL; | |
492 | map->page = NULL; | |
493 | } | |
494 | EXPORT_SYMBOL(ttm_bo_kunmap); | |
495 | ||
496 | int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, | |
497 | unsigned long dst_offset, | |
498 | unsigned long *pfn, pgprot_t *prot) | |
499 | { | |
500 | struct ttm_mem_reg *mem = &bo->mem; | |
501 | struct ttm_bo_device *bdev = bo->bdev; | |
502 | unsigned long bus_offset; | |
503 | unsigned long bus_size; | |
504 | unsigned long bus_base; | |
505 | int ret; | |
506 | ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, | |
507 | &bus_size); | |
508 | if (ret) | |
509 | return -EINVAL; | |
510 | if (bus_size != 0) | |
511 | *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; | |
512 | else | |
513 | if (!bo->ttm) | |
514 | return -EINVAL; | |
515 | else | |
516 | *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm, | |
517 | dst_offset >> | |
518 | PAGE_SHIFT)); | |
519 | *prot = (mem->placement & TTM_PL_FLAG_CACHED) ? | |
520 | PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL); | |
521 | ||
522 | return 0; | |
523 | } | |
524 | ||
525 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |
526 | void *sync_obj, | |
527 | void *sync_obj_arg, | |
528 | bool evict, bool no_wait, | |
529 | struct ttm_mem_reg *new_mem) | |
530 | { | |
531 | struct ttm_bo_device *bdev = bo->bdev; | |
532 | struct ttm_bo_driver *driver = bdev->driver; | |
533 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | |
534 | struct ttm_mem_reg *old_mem = &bo->mem; | |
535 | int ret; | |
ba4e7d97 TH |
536 | struct ttm_buffer_object *ghost_obj; |
537 | void *tmp_obj = NULL; | |
538 | ||
539 | spin_lock(&bo->lock); | |
540 | if (bo->sync_obj) { | |
541 | tmp_obj = bo->sync_obj; | |
542 | bo->sync_obj = NULL; | |
543 | } | |
544 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | |
545 | bo->sync_obj_arg = sync_obj_arg; | |
546 | if (evict) { | |
547 | ret = ttm_bo_wait(bo, false, false, false); | |
548 | spin_unlock(&bo->lock); | |
4677f15c TH |
549 | if (tmp_obj) |
550 | driver->sync_obj_unref(&tmp_obj); | |
ba4e7d97 TH |
551 | if (ret) |
552 | return ret; | |
553 | ||
554 | ttm_bo_free_old_node(bo); | |
555 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && | |
556 | (bo->ttm != NULL)) { | |
557 | ttm_tt_unbind(bo->ttm); | |
558 | ttm_tt_destroy(bo->ttm); | |
559 | bo->ttm = NULL; | |
560 | } | |
561 | } else { | |
562 | /** | |
563 | * This should help pipeline ordinary buffer moves. | |
564 | * | |
565 | * Hang old buffer memory on a new buffer object, | |
566 | * and leave it to be released when the GPU | |
567 | * operation has completed. | |
568 | */ | |
569 | ||
570 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | |
571 | spin_unlock(&bo->lock); | |
4677f15c TH |
572 | if (tmp_obj) |
573 | driver->sync_obj_unref(&tmp_obj); | |
ba4e7d97 TH |
574 | |
575 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | |
576 | if (ret) | |
577 | return ret; | |
578 | ||
579 | /** | |
580 | * If we're not moving to fixed memory, the TTM object | |
581 | * needs to stay alive. Otherwhise hang it on the ghost | |
582 | * bo to be unbound and destroyed. | |
583 | */ | |
584 | ||
585 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
586 | ghost_obj->ttm = NULL; | |
587 | else | |
588 | bo->ttm = NULL; | |
589 | ||
590 | ttm_bo_unreserve(ghost_obj); | |
591 | ttm_bo_unref(&ghost_obj); | |
592 | } | |
593 | ||
594 | *old_mem = *new_mem; | |
595 | new_mem->mm_node = NULL; | |
110b20c3 | 596 | |
ba4e7d97 TH |
597 | return 0; |
598 | } | |
599 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |