]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
Merge tag 'media/v5.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_ttm_buffer.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31
32 static const struct ttm_place vram_placement_flags = {
33 .fpfn = 0,
34 .lpfn = 0,
35 .mem_type = TTM_PL_VRAM,
36 .flags = 0
37 };
38
39 static const struct ttm_place sys_placement_flags = {
40 .fpfn = 0,
41 .lpfn = 0,
42 .mem_type = TTM_PL_SYSTEM,
43 .flags = 0
44 };
45
46 static const struct ttm_place gmr_placement_flags = {
47 .fpfn = 0,
48 .lpfn = 0,
49 .mem_type = VMW_PL_GMR,
50 .flags = 0
51 };
52
53 static const struct ttm_place mob_placement_flags = {
54 .fpfn = 0,
55 .lpfn = 0,
56 .mem_type = VMW_PL_MOB,
57 .flags = 0
58 };
59
60 struct ttm_placement vmw_vram_placement = {
61 .num_placement = 1,
62 .placement = &vram_placement_flags,
63 .num_busy_placement = 1,
64 .busy_placement = &vram_placement_flags
65 };
66
67 static const struct ttm_place vram_gmr_placement_flags[] = {
68 {
69 .fpfn = 0,
70 .lpfn = 0,
71 .mem_type = TTM_PL_VRAM,
72 .flags = 0
73 }, {
74 .fpfn = 0,
75 .lpfn = 0,
76 .mem_type = VMW_PL_GMR,
77 .flags = 0
78 }
79 };
80
81 static const struct ttm_place gmr_vram_placement_flags[] = {
82 {
83 .fpfn = 0,
84 .lpfn = 0,
85 .mem_type = VMW_PL_GMR,
86 .flags = 0
87 }, {
88 .fpfn = 0,
89 .lpfn = 0,
90 .mem_type = TTM_PL_VRAM,
91 .flags = 0
92 }
93 };
94
95 struct ttm_placement vmw_vram_gmr_placement = {
96 .num_placement = 2,
97 .placement = vram_gmr_placement_flags,
98 .num_busy_placement = 1,
99 .busy_placement = &gmr_placement_flags
100 };
101
102 struct ttm_placement vmw_vram_sys_placement = {
103 .num_placement = 1,
104 .placement = &vram_placement_flags,
105 .num_busy_placement = 1,
106 .busy_placement = &sys_placement_flags
107 };
108
109 struct ttm_placement vmw_sys_placement = {
110 .num_placement = 1,
111 .placement = &sys_placement_flags,
112 .num_busy_placement = 1,
113 .busy_placement = &sys_placement_flags
114 };
115
116 static const struct ttm_place evictable_placement_flags[] = {
117 {
118 .fpfn = 0,
119 .lpfn = 0,
120 .mem_type = TTM_PL_SYSTEM,
121 .flags = 0
122 }, {
123 .fpfn = 0,
124 .lpfn = 0,
125 .mem_type = TTM_PL_VRAM,
126 .flags = 0
127 }, {
128 .fpfn = 0,
129 .lpfn = 0,
130 .mem_type = VMW_PL_GMR,
131 .flags = 0
132 }, {
133 .fpfn = 0,
134 .lpfn = 0,
135 .mem_type = VMW_PL_MOB,
136 .flags = 0
137 }
138 };
139
140 static const struct ttm_place nonfixed_placement_flags[] = {
141 {
142 .fpfn = 0,
143 .lpfn = 0,
144 .mem_type = TTM_PL_SYSTEM,
145 .flags = 0
146 }, {
147 .fpfn = 0,
148 .lpfn = 0,
149 .mem_type = VMW_PL_GMR,
150 .flags = 0
151 }, {
152 .fpfn = 0,
153 .lpfn = 0,
154 .mem_type = VMW_PL_MOB,
155 .flags = 0
156 }
157 };
158
159 struct ttm_placement vmw_evictable_placement = {
160 .num_placement = 4,
161 .placement = evictable_placement_flags,
162 .num_busy_placement = 1,
163 .busy_placement = &sys_placement_flags
164 };
165
166 struct ttm_placement vmw_srf_placement = {
167 .num_placement = 1,
168 .num_busy_placement = 2,
169 .placement = &gmr_placement_flags,
170 .busy_placement = gmr_vram_placement_flags
171 };
172
173 struct ttm_placement vmw_mob_placement = {
174 .num_placement = 1,
175 .num_busy_placement = 1,
176 .placement = &mob_placement_flags,
177 .busy_placement = &mob_placement_flags
178 };
179
180 struct ttm_placement vmw_nonfixed_placement = {
181 .num_placement = 3,
182 .placement = nonfixed_placement_flags,
183 .num_busy_placement = 1,
184 .busy_placement = &sys_placement_flags
185 };
186
187 struct vmw_ttm_tt {
188 struct ttm_tt dma_ttm;
189 struct vmw_private *dev_priv;
190 int gmr_id;
191 struct vmw_mob *mob;
192 int mem_type;
193 struct sg_table sgt;
194 struct vmw_sg_table vsgt;
195 uint64_t sg_alloc_size;
196 bool mapped;
197 bool bound;
198 };
199
200 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
201
202 /**
203 * __vmw_piter_non_sg_next: Helper functions to advance
204 * a struct vmw_piter iterator.
205 *
206 * @viter: Pointer to the iterator.
207 *
208 * These functions return false if past the end of the list,
209 * true otherwise. Functions are selected depending on the current
210 * DMA mapping mode.
211 */
212 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
213 {
214 return ++(viter->i) < viter->num_pages;
215 }
216
217 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
218 {
219 bool ret = __vmw_piter_non_sg_next(viter);
220
221 return __sg_page_iter_dma_next(&viter->iter) && ret;
222 }
223
224
225 /**
226 * __vmw_piter_non_sg_page: Helper functions to return a pointer
227 * to the current page.
228 *
229 * @viter: Pointer to the iterator
230 *
231 * These functions return a pointer to the page currently
232 * pointed to by @viter. Functions are selected depending on the
233 * current mapping mode.
234 */
235 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
236 {
237 return viter->pages[viter->i];
238 }
239
240 /**
241 * __vmw_piter_phys_addr: Helper functions to return the DMA
242 * address of the current page.
243 *
244 * @viter: Pointer to the iterator
245 *
246 * These functions return the DMA address of the page currently
247 * pointed to by @viter. Functions are selected depending on the
248 * current mapping mode.
249 */
250 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
251 {
252 return page_to_phys(viter->pages[viter->i]);
253 }
254
255 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
256 {
257 return viter->addrs[viter->i];
258 }
259
260 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
261 {
262 return sg_page_iter_dma_address(&viter->iter);
263 }
264
265
266 /**
267 * vmw_piter_start - Initialize a struct vmw_piter.
268 *
269 * @viter: Pointer to the iterator to initialize
270 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
271 * @p_offset: Pointer offset used to update current array position
272 *
273 * Note that we're following the convention of __sg_page_iter_start, so that
274 * the iterator doesn't point to a valid page after initialization; it has
275 * to be advanced one step first.
276 */
277 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
278 unsigned long p_offset)
279 {
280 viter->i = p_offset - 1;
281 viter->num_pages = vsgt->num_pages;
282 viter->page = &__vmw_piter_non_sg_page;
283 viter->pages = vsgt->pages;
284 switch (vsgt->mode) {
285 case vmw_dma_phys:
286 viter->next = &__vmw_piter_non_sg_next;
287 viter->dma_address = &__vmw_piter_phys_addr;
288 break;
289 case vmw_dma_alloc_coherent:
290 viter->next = &__vmw_piter_non_sg_next;
291 viter->dma_address = &__vmw_piter_dma_addr;
292 viter->addrs = vsgt->addrs;
293 break;
294 case vmw_dma_map_populate:
295 case vmw_dma_map_bind:
296 viter->next = &__vmw_piter_sg_next;
297 viter->dma_address = &__vmw_piter_sg_addr;
298 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
299 vsgt->sgt->orig_nents, p_offset);
300 break;
301 default:
302 BUG();
303 }
304 }
305
306 /**
307 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
308 * TTM pages
309 *
310 * @vmw_tt: Pointer to a struct vmw_ttm_backend
311 *
312 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
313 */
314 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
315 {
316 struct device *dev = vmw_tt->dev_priv->drm.dev;
317
318 dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
319 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
320 }
321
322 /**
323 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
324 *
325 * @vmw_tt: Pointer to a struct vmw_ttm_backend
326 *
327 * This function is used to get device addresses from the kernel DMA layer.
328 * However, it's violating the DMA API in that when this operation has been
329 * performed, it's illegal for the CPU to write to the pages without first
330 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
331 * therefore only legal to call this function if we know that the function
332 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
333 * a CPU write buffer flush.
334 */
335 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
336 {
337 struct device *dev = vmw_tt->dev_priv->drm.dev;
338
339 return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
340 }
341
342 /**
343 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
344 *
345 * @vmw_tt: Pointer to a struct vmw_ttm_tt
346 *
347 * Select the correct function for and make sure the TTM pages are
348 * visible to the device. Allocate storage for the device mappings.
349 * If a mapping has already been performed, indicated by the storage
350 * pointer being non NULL, the function returns success.
351 */
352 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
353 {
354 struct vmw_private *dev_priv = vmw_tt->dev_priv;
355 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
356 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
357 struct ttm_operation_ctx ctx = {
358 .interruptible = true,
359 .no_wait_gpu = false
360 };
361 struct vmw_piter iter;
362 dma_addr_t old;
363 int ret = 0;
364 static size_t sgl_size;
365 static size_t sgt_size;
366 struct scatterlist *sg;
367
368 if (vmw_tt->mapped)
369 return 0;
370
371 vsgt->mode = dev_priv->map_mode;
372 vsgt->pages = vmw_tt->dma_ttm.pages;
373 vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
374 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
375 vsgt->sgt = &vmw_tt->sgt;
376
377 switch (dev_priv->map_mode) {
378 case vmw_dma_map_bind:
379 case vmw_dma_map_populate:
380 if (unlikely(!sgl_size)) {
381 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
382 sgt_size = ttm_round_pot(sizeof(struct sg_table));
383 }
384 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
385 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
386 if (unlikely(ret != 0))
387 return ret;
388
389 sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
390 vsgt->num_pages, 0,
391 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
392 dma_get_max_seg_size(dev_priv->drm.dev),
393 NULL, 0, GFP_KERNEL);
394 if (IS_ERR(sg)) {
395 ret = PTR_ERR(sg);
396 goto out_sg_alloc_fail;
397 }
398
399 if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
400 uint64_t over_alloc =
401 sgl_size * (vsgt->num_pages -
402 vmw_tt->sgt.orig_nents);
403
404 ttm_mem_global_free(glob, over_alloc);
405 vmw_tt->sg_alloc_size -= over_alloc;
406 }
407
408 ret = vmw_ttm_map_for_dma(vmw_tt);
409 if (unlikely(ret != 0))
410 goto out_map_fail;
411
412 break;
413 default:
414 break;
415 }
416
417 old = ~((dma_addr_t) 0);
418 vmw_tt->vsgt.num_regions = 0;
419 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
420 dma_addr_t cur = vmw_piter_dma_addr(&iter);
421
422 if (cur != old + PAGE_SIZE)
423 vmw_tt->vsgt.num_regions++;
424 old = cur;
425 }
426
427 vmw_tt->mapped = true;
428 return 0;
429
430 out_map_fail:
431 sg_free_table(vmw_tt->vsgt.sgt);
432 vmw_tt->vsgt.sgt = NULL;
433 out_sg_alloc_fail:
434 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
435 return ret;
436 }
437
438 /**
439 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
440 *
441 * @vmw_tt: Pointer to a struct vmw_ttm_tt
442 *
443 * Tear down any previously set up device DMA mappings and free
444 * any storage space allocated for them. If there are no mappings set up,
445 * this function is a NOP.
446 */
447 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
448 {
449 struct vmw_private *dev_priv = vmw_tt->dev_priv;
450
451 if (!vmw_tt->vsgt.sgt)
452 return;
453
454 switch (dev_priv->map_mode) {
455 case vmw_dma_map_bind:
456 case vmw_dma_map_populate:
457 vmw_ttm_unmap_from_dma(vmw_tt);
458 sg_free_table(vmw_tt->vsgt.sgt);
459 vmw_tt->vsgt.sgt = NULL;
460 ttm_mem_global_free(vmw_mem_glob(dev_priv),
461 vmw_tt->sg_alloc_size);
462 break;
463 default:
464 break;
465 }
466 vmw_tt->mapped = false;
467 }
468
469 /**
470 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
471 * TTM buffer object
472 *
473 * @bo: Pointer to a struct ttm_buffer_object
474 *
475 * Returns a pointer to a struct vmw_sg_table object. The object should
476 * not be freed after use.
477 * Note that for the device addresses to be valid, the buffer object must
478 * either be reserved or pinned.
479 */
480 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
481 {
482 struct vmw_ttm_tt *vmw_tt =
483 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
484
485 return &vmw_tt->vsgt;
486 }
487
488
489 static int vmw_ttm_bind(struct ttm_device *bdev,
490 struct ttm_tt *ttm, struct ttm_resource *bo_mem)
491 {
492 struct vmw_ttm_tt *vmw_be =
493 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
494 int ret = 0;
495
496 if (!bo_mem)
497 return -EINVAL;
498
499 if (vmw_be->bound)
500 return 0;
501
502 ret = vmw_ttm_map_dma(vmw_be);
503 if (unlikely(ret != 0))
504 return ret;
505
506 vmw_be->gmr_id = bo_mem->start;
507 vmw_be->mem_type = bo_mem->mem_type;
508
509 switch (bo_mem->mem_type) {
510 case VMW_PL_GMR:
511 ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
512 ttm->num_pages, vmw_be->gmr_id);
513 break;
514 case VMW_PL_MOB:
515 if (unlikely(vmw_be->mob == NULL)) {
516 vmw_be->mob =
517 vmw_mob_create(ttm->num_pages);
518 if (unlikely(vmw_be->mob == NULL))
519 return -ENOMEM;
520 }
521
522 ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
523 &vmw_be->vsgt, ttm->num_pages,
524 vmw_be->gmr_id);
525 break;
526 default:
527 BUG();
528 }
529 vmw_be->bound = true;
530 return ret;
531 }
532
533 static void vmw_ttm_unbind(struct ttm_device *bdev,
534 struct ttm_tt *ttm)
535 {
536 struct vmw_ttm_tt *vmw_be =
537 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
538
539 if (!vmw_be->bound)
540 return;
541
542 switch (vmw_be->mem_type) {
543 case VMW_PL_GMR:
544 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
545 break;
546 case VMW_PL_MOB:
547 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
548 break;
549 default:
550 BUG();
551 }
552
553 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
554 vmw_ttm_unmap_dma(vmw_be);
555 vmw_be->bound = false;
556 }
557
558
559 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
560 {
561 struct vmw_ttm_tt *vmw_be =
562 container_of(ttm, struct vmw_ttm_tt, dma_ttm);
563
564 vmw_ttm_unbind(bdev, ttm);
565 ttm_tt_destroy_common(bdev, ttm);
566 vmw_ttm_unmap_dma(vmw_be);
567 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
568 ttm_tt_fini(&vmw_be->dma_ttm);
569 else
570 ttm_tt_fini(ttm);
571
572 if (vmw_be->mob)
573 vmw_mob_destroy(vmw_be->mob);
574
575 kfree(vmw_be);
576 }
577
578
579 static int vmw_ttm_populate(struct ttm_device *bdev,
580 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
581 {
582 unsigned int i;
583 int ret;
584
585 /* TODO: maybe completely drop this ? */
586 if (ttm_tt_is_populated(ttm))
587 return 0;
588
589 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
590 if (ret)
591 return ret;
592
593 for (i = 0; i < ttm->num_pages; ++i) {
594 ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
595 PAGE_SIZE, ctx);
596 if (ret)
597 goto error;
598 }
599 return 0;
600
601 error:
602 while (i--)
603 ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
604 PAGE_SIZE);
605 ttm_pool_free(&bdev->pool, ttm);
606 return ret;
607 }
608
609 static void vmw_ttm_unpopulate(struct ttm_device *bdev,
610 struct ttm_tt *ttm)
611 {
612 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
613 dma_ttm);
614 unsigned int i;
615
616 if (vmw_tt->mob) {
617 vmw_mob_destroy(vmw_tt->mob);
618 vmw_tt->mob = NULL;
619 }
620
621 vmw_ttm_unmap_dma(vmw_tt);
622
623 for (i = 0; i < ttm->num_pages; ++i)
624 ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
625 PAGE_SIZE);
626
627 ttm_pool_free(&bdev->pool, ttm);
628 }
629
630 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
631 uint32_t page_flags)
632 {
633 struct vmw_ttm_tt *vmw_be;
634 int ret;
635
636 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
637 if (!vmw_be)
638 return NULL;
639
640 vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
641 vmw_be->mob = NULL;
642
643 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
644 ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
645 ttm_cached);
646 else
647 ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
648 ttm_cached);
649 if (unlikely(ret != 0))
650 goto out_no_init;
651
652 return &vmw_be->dma_ttm;
653 out_no_init:
654 kfree(vmw_be);
655 return NULL;
656 }
657
658 static void vmw_evict_flags(struct ttm_buffer_object *bo,
659 struct ttm_placement *placement)
660 {
661 *placement = vmw_sys_placement;
662 }
663
664 static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
665 {
666 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
667
668 switch (mem->mem_type) {
669 case TTM_PL_SYSTEM:
670 case VMW_PL_GMR:
671 case VMW_PL_MOB:
672 return 0;
673 case TTM_PL_VRAM:
674 mem->bus.offset = (mem->start << PAGE_SHIFT) +
675 dev_priv->vram_start;
676 mem->bus.is_iomem = true;
677 mem->bus.caching = ttm_cached;
678 break;
679 default:
680 return -EINVAL;
681 }
682 return 0;
683 }
684
685 /**
686 * vmw_move_notify - TTM move_notify_callback
687 *
688 * @bo: The TTM buffer object about to move.
689 * @old_mem: The old memory where we move from
690 * @new_mem: The struct ttm_resource indicating to what memory
691 * region the move is taking place.
692 *
693 * Calls move_notify for all subsystems needing it.
694 * (currently only resources).
695 */
696 static void vmw_move_notify(struct ttm_buffer_object *bo,
697 struct ttm_resource *old_mem,
698 struct ttm_resource *new_mem)
699 {
700 vmw_bo_move_notify(bo, new_mem);
701 vmw_query_move_notify(bo, old_mem, new_mem);
702 }
703
704
705 /**
706 * vmw_swap_notify - TTM move_notify_callback
707 *
708 * @bo: The TTM buffer object about to be swapped out.
709 */
710 static void vmw_swap_notify(struct ttm_buffer_object *bo)
711 {
712 vmw_bo_swap_notify(bo);
713 (void) ttm_bo_wait(bo, false, false);
714 }
715
716 static int vmw_move(struct ttm_buffer_object *bo,
717 bool evict,
718 struct ttm_operation_ctx *ctx,
719 struct ttm_resource *new_mem,
720 struct ttm_place *hop)
721 {
722 struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
723 struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
724 int ret;
725
726 if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
727 ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
728 if (ret)
729 return ret;
730 }
731
732 vmw_move_notify(bo, bo->resource, new_mem);
733
734 if (old_man->use_tt && new_man->use_tt) {
735 if (bo->resource->mem_type == TTM_PL_SYSTEM) {
736 ttm_bo_move_null(bo, new_mem);
737 return 0;
738 }
739 ret = ttm_bo_wait_ctx(bo, ctx);
740 if (ret)
741 goto fail;
742
743 vmw_ttm_unbind(bo->bdev, bo->ttm);
744 ttm_resource_free(bo, &bo->resource);
745 ttm_bo_assign_mem(bo, new_mem);
746 return 0;
747 } else {
748 ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
749 if (ret)
750 goto fail;
751 }
752 return 0;
753 fail:
754 vmw_move_notify(bo, new_mem, bo->resource);
755 return ret;
756 }
757
758 struct ttm_device_funcs vmw_bo_driver = {
759 .ttm_tt_create = &vmw_ttm_tt_create,
760 .ttm_tt_populate = &vmw_ttm_populate,
761 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
762 .ttm_tt_destroy = &vmw_ttm_destroy,
763 .eviction_valuable = ttm_bo_eviction_valuable,
764 .evict_flags = vmw_evict_flags,
765 .move = vmw_move,
766 .swap_notify = vmw_swap_notify,
767 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
768 };
769
770 int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
771 unsigned long bo_size,
772 struct ttm_buffer_object **bo_p)
773 {
774 struct ttm_operation_ctx ctx = {
775 .interruptible = false,
776 .no_wait_gpu = false
777 };
778 struct ttm_buffer_object *bo;
779 int ret;
780
781 ret = vmw_bo_create_kernel(dev_priv, bo_size,
782 &vmw_sys_placement,
783 &bo);
784 if (unlikely(ret != 0))
785 return ret;
786
787 ret = ttm_bo_reserve(bo, false, true, NULL);
788 BUG_ON(ret != 0);
789 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
790 if (likely(ret == 0)) {
791 struct vmw_ttm_tt *vmw_tt =
792 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
793 ret = vmw_ttm_map_dma(vmw_tt);
794 }
795
796 ttm_bo_unreserve(bo);
797
798 if (likely(ret == 0))
799 *bo_p = bo;
800 return ret;
801 }