]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
37 #include "amdgpu.h"
38 #include "amdgpu_trace.h"
39
40
41
42 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
43 struct ttm_mem_reg *mem)
44 {
45 if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
46 return 0;
47
48 return ((mem->start << PAGE_SHIFT) + mem->size) >
49 adev->mc.visible_vram_size ?
50 adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
51 mem->size;
52 }
53
54 static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
55 struct ttm_mem_reg *old_mem,
56 struct ttm_mem_reg *new_mem)
57 {
58 u64 vis_size;
59 if (!adev)
60 return;
61
62 if (new_mem) {
63 switch (new_mem->mem_type) {
64 case TTM_PL_TT:
65 atomic64_add(new_mem->size, &adev->gtt_usage);
66 break;
67 case TTM_PL_VRAM:
68 atomic64_add(new_mem->size, &adev->vram_usage);
69 vis_size = amdgpu_get_vis_part_size(adev, new_mem);
70 atomic64_add(vis_size, &adev->vram_vis_usage);
71 break;
72 }
73 }
74
75 if (old_mem) {
76 switch (old_mem->mem_type) {
77 case TTM_PL_TT:
78 atomic64_sub(old_mem->size, &adev->gtt_usage);
79 break;
80 case TTM_PL_VRAM:
81 atomic64_sub(old_mem->size, &adev->vram_usage);
82 vis_size = amdgpu_get_vis_part_size(adev, old_mem);
83 atomic64_sub(vis_size, &adev->vram_vis_usage);
84 break;
85 }
86 }
87 }
88
89 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
90 {
91 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
92 struct amdgpu_bo *bo;
93
94 bo = container_of(tbo, struct amdgpu_bo, tbo);
95
96 amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
97
98 drm_gem_object_release(&bo->gem_base);
99 amdgpu_bo_unref(&bo->parent);
100 if (!list_empty(&bo->shadow_list)) {
101 mutex_lock(&adev->shadow_list_lock);
102 list_del_init(&bo->shadow_list);
103 mutex_unlock(&adev->shadow_list_lock);
104 }
105 kfree(bo->metadata);
106 kfree(bo);
107 }
108
109 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
110 {
111 if (bo->destroy == &amdgpu_ttm_bo_destroy)
112 return true;
113 return false;
114 }
115
116 static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
117 struct ttm_placement *placement,
118 struct ttm_place *places,
119 u32 domain, u64 flags)
120 {
121 u32 c = 0;
122
123 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
124 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
125
126 places[c].fpfn = 0;
127 places[c].lpfn = 0;
128 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
129 TTM_PL_FLAG_VRAM;
130
131 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
132 places[c].lpfn = visible_pfn;
133 else
134 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
135
136 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
137 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
138 c++;
139 }
140
141 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
142 places[c].fpfn = 0;
143 places[c].lpfn = 0;
144 places[c].flags = TTM_PL_FLAG_TT;
145 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
146 places[c].flags |= TTM_PL_FLAG_WC |
147 TTM_PL_FLAG_UNCACHED;
148 else
149 places[c].flags |= TTM_PL_FLAG_CACHED;
150 c++;
151 }
152
153 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
154 places[c].fpfn = 0;
155 places[c].lpfn = 0;
156 places[c].flags = TTM_PL_FLAG_SYSTEM;
157 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
158 places[c].flags |= TTM_PL_FLAG_WC |
159 TTM_PL_FLAG_UNCACHED;
160 else
161 places[c].flags |= TTM_PL_FLAG_CACHED;
162 c++;
163 }
164
165 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
166 places[c].fpfn = 0;
167 places[c].lpfn = 0;
168 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
169 c++;
170 }
171
172 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
173 places[c].fpfn = 0;
174 places[c].lpfn = 0;
175 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
176 c++;
177 }
178
179 if (domain & AMDGPU_GEM_DOMAIN_OA) {
180 places[c].fpfn = 0;
181 places[c].lpfn = 0;
182 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
183 c++;
184 }
185
186 if (!c) {
187 places[c].fpfn = 0;
188 places[c].lpfn = 0;
189 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
190 c++;
191 }
192
193 placement->num_placement = c;
194 placement->placement = places;
195
196 placement->num_busy_placement = c;
197 placement->busy_placement = places;
198 }
199
200 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
201 {
202 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
203
204 amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
205 domain, abo->flags);
206 }
207
208 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
209 struct ttm_placement *placement)
210 {
211 BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
212
213 memcpy(bo->placements, placement->placement,
214 placement->num_placement * sizeof(struct ttm_place));
215 bo->placement.num_placement = placement->num_placement;
216 bo->placement.num_busy_placement = placement->num_busy_placement;
217 bo->placement.placement = bo->placements;
218 bo->placement.busy_placement = bo->placements;
219 }
220
221 /**
222 * amdgpu_bo_create_kernel - create BO for kernel use
223 *
224 * @adev: amdgpu device object
225 * @size: size for the new BO
226 * @align: alignment for the new BO
227 * @domain: where to place it
228 * @bo_ptr: resulting BO
229 * @gpu_addr: GPU addr of the pinned BO
230 * @cpu_addr: optional CPU address mapping
231 *
232 * Allocates and pins a BO for kernel internal use.
233 *
234 * Returns 0 on success, negative error code otherwise.
235 */
236 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
237 unsigned long size, int align,
238 u32 domain, struct amdgpu_bo **bo_ptr,
239 u64 *gpu_addr, void **cpu_addr)
240 {
241 int r;
242
243 r = amdgpu_bo_create(adev, size, align, true, domain,
244 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
245 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
246 NULL, NULL, bo_ptr);
247 if (r) {
248 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
249 return r;
250 }
251
252 r = amdgpu_bo_reserve(*bo_ptr, false);
253 if (r) {
254 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
255 goto error_free;
256 }
257
258 r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
259 if (r) {
260 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
261 goto error_unreserve;
262 }
263
264 if (cpu_addr) {
265 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
266 if (r) {
267 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
268 goto error_unreserve;
269 }
270 }
271
272 amdgpu_bo_unreserve(*bo_ptr);
273
274 return 0;
275
276 error_unreserve:
277 amdgpu_bo_unreserve(*bo_ptr);
278
279 error_free:
280 amdgpu_bo_unref(bo_ptr);
281
282 return r;
283 }
284
285 /**
286 * amdgpu_bo_free_kernel - free BO for kernel use
287 *
288 * @bo: amdgpu BO to free
289 *
290 * unmaps and unpin a BO for kernel internal use.
291 */
292 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
293 void **cpu_addr)
294 {
295 if (*bo == NULL)
296 return;
297
298 if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
299 if (cpu_addr)
300 amdgpu_bo_kunmap(*bo);
301
302 amdgpu_bo_unpin(*bo);
303 amdgpu_bo_unreserve(*bo);
304 }
305 amdgpu_bo_unref(bo);
306
307 if (gpu_addr)
308 *gpu_addr = 0;
309
310 if (cpu_addr)
311 *cpu_addr = NULL;
312 }
313
314 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
315 unsigned long size, int byte_align,
316 bool kernel, u32 domain, u64 flags,
317 struct sg_table *sg,
318 struct ttm_placement *placement,
319 struct reservation_object *resv,
320 struct amdgpu_bo **bo_ptr)
321 {
322 struct amdgpu_bo *bo;
323 enum ttm_bo_type type;
324 unsigned long page_align;
325 u64 initial_bytes_moved;
326 size_t acc_size;
327 int r;
328
329 page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
330 size = ALIGN(size, PAGE_SIZE);
331
332 if (kernel) {
333 type = ttm_bo_type_kernel;
334 } else if (sg) {
335 type = ttm_bo_type_sg;
336 } else {
337 type = ttm_bo_type_device;
338 }
339 *bo_ptr = NULL;
340
341 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
342 sizeof(struct amdgpu_bo));
343
344 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
345 if (bo == NULL)
346 return -ENOMEM;
347 r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
348 if (unlikely(r)) {
349 kfree(bo);
350 return r;
351 }
352 INIT_LIST_HEAD(&bo->shadow_list);
353 INIT_LIST_HEAD(&bo->va);
354 bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
355 AMDGPU_GEM_DOMAIN_GTT |
356 AMDGPU_GEM_DOMAIN_CPU |
357 AMDGPU_GEM_DOMAIN_GDS |
358 AMDGPU_GEM_DOMAIN_GWS |
359 AMDGPU_GEM_DOMAIN_OA);
360 bo->allowed_domains = bo->prefered_domains;
361 if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
362 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
363
364 bo->flags = flags;
365
366 #ifdef CONFIG_X86_32
367 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
368 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
369 */
370 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
371 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
372 /* Don't try to enable write-combining when it can't work, or things
373 * may be slow
374 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
375 */
376
377 #ifndef CONFIG_COMPILE_TEST
378 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
379 thanks to write-combining
380 #endif
381
382 if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
383 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
384 "better performance thanks to write-combining\n");
385 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
386 #else
387 /* For architectures that don't support WC memory,
388 * mask out the WC flag from the BO
389 */
390 if (!drm_arch_can_wc_memory())
391 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
392 #endif
393
394 amdgpu_fill_placement_to_bo(bo, placement);
395 /* Kernel allocation are uninterruptible */
396
397 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
398 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
399 &bo->placement, page_align, !kernel, NULL,
400 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
401 amdgpu_cs_report_moved_bytes(adev,
402 atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
403
404 if (unlikely(r != 0))
405 return r;
406
407 if (kernel)
408 bo->tbo.priority = 1;
409
410 if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
411 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
412 struct dma_fence *fence;
413
414 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
415 if (unlikely(r))
416 goto fail_unreserve;
417
418 amdgpu_bo_fence(bo, fence, false);
419 dma_fence_put(bo->tbo.moving);
420 bo->tbo.moving = dma_fence_get(fence);
421 dma_fence_put(fence);
422 }
423 if (!resv)
424 amdgpu_bo_unreserve(bo);
425 *bo_ptr = bo;
426
427 trace_amdgpu_bo_create(bo);
428
429 return 0;
430
431 fail_unreserve:
432 if (!resv)
433 ww_mutex_unlock(&bo->tbo.resv->lock);
434 amdgpu_bo_unref(&bo);
435 return r;
436 }
437
438 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
439 unsigned long size, int byte_align,
440 struct amdgpu_bo *bo)
441 {
442 struct ttm_placement placement = {0};
443 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
444 int r;
445
446 if (bo->shadow)
447 return 0;
448
449 bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
450 memset(&placements, 0,
451 (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
452
453 amdgpu_ttm_placement_init(adev, &placement,
454 placements, AMDGPU_GEM_DOMAIN_GTT,
455 AMDGPU_GEM_CREATE_CPU_GTT_USWC);
456
457 r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
458 AMDGPU_GEM_DOMAIN_GTT,
459 AMDGPU_GEM_CREATE_CPU_GTT_USWC,
460 NULL, &placement,
461 bo->tbo.resv,
462 &bo->shadow);
463 if (!r) {
464 bo->shadow->parent = amdgpu_bo_ref(bo);
465 mutex_lock(&adev->shadow_list_lock);
466 list_add_tail(&bo->shadow_list, &adev->shadow_list);
467 mutex_unlock(&adev->shadow_list_lock);
468 }
469
470 return r;
471 }
472
473 int amdgpu_bo_create(struct amdgpu_device *adev,
474 unsigned long size, int byte_align,
475 bool kernel, u32 domain, u64 flags,
476 struct sg_table *sg,
477 struct reservation_object *resv,
478 struct amdgpu_bo **bo_ptr)
479 {
480 struct ttm_placement placement = {0};
481 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
482 int r;
483
484 memset(&placements, 0,
485 (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
486
487 amdgpu_ttm_placement_init(adev, &placement,
488 placements, domain, flags);
489
490 r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
491 domain, flags, sg, &placement,
492 resv, bo_ptr);
493 if (r)
494 return r;
495
496 if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
497 if (!resv) {
498 r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
499 WARN_ON(r != 0);
500 }
501
502 r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
503
504 if (!resv)
505 ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
506
507 if (r)
508 amdgpu_bo_unref(bo_ptr);
509 }
510
511 return r;
512 }
513
514 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
515 struct amdgpu_ring *ring,
516 struct amdgpu_bo *bo,
517 struct reservation_object *resv,
518 struct dma_fence **fence,
519 bool direct)
520
521 {
522 struct amdgpu_bo *shadow = bo->shadow;
523 uint64_t bo_addr, shadow_addr;
524 int r;
525
526 if (!shadow)
527 return -EINVAL;
528
529 bo_addr = amdgpu_bo_gpu_offset(bo);
530 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
531
532 r = reservation_object_reserve_shared(bo->tbo.resv);
533 if (r)
534 goto err;
535
536 r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
537 amdgpu_bo_size(bo), resv, fence,
538 direct);
539 if (!r)
540 amdgpu_bo_fence(bo, *fence, true);
541
542 err:
543 return r;
544 }
545
546 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
547 struct amdgpu_ring *ring,
548 struct amdgpu_bo *bo,
549 struct reservation_object *resv,
550 struct dma_fence **fence,
551 bool direct)
552
553 {
554 struct amdgpu_bo *shadow = bo->shadow;
555 uint64_t bo_addr, shadow_addr;
556 int r;
557
558 if (!shadow)
559 return -EINVAL;
560
561 bo_addr = amdgpu_bo_gpu_offset(bo);
562 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
563
564 r = reservation_object_reserve_shared(bo->tbo.resv);
565 if (r)
566 goto err;
567
568 r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
569 amdgpu_bo_size(bo), resv, fence,
570 direct);
571 if (!r)
572 amdgpu_bo_fence(bo, *fence, true);
573
574 err:
575 return r;
576 }
577
578 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
579 {
580 bool is_iomem;
581 long r;
582
583 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
584 return -EPERM;
585
586 if (bo->kptr) {
587 if (ptr) {
588 *ptr = bo->kptr;
589 }
590 return 0;
591 }
592
593 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
594 MAX_SCHEDULE_TIMEOUT);
595 if (r < 0)
596 return r;
597
598 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
599 if (r)
600 return r;
601
602 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
603 if (ptr)
604 *ptr = bo->kptr;
605
606 return 0;
607 }
608
609 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
610 {
611 if (bo->kptr == NULL)
612 return;
613 bo->kptr = NULL;
614 ttm_bo_kunmap(&bo->kmap);
615 }
616
617 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
618 {
619 if (bo == NULL)
620 return NULL;
621
622 ttm_bo_reference(&bo->tbo);
623 return bo;
624 }
625
626 void amdgpu_bo_unref(struct amdgpu_bo **bo)
627 {
628 struct ttm_buffer_object *tbo;
629
630 if ((*bo) == NULL)
631 return;
632
633 tbo = &((*bo)->tbo);
634 ttm_bo_unref(&tbo);
635 if (tbo == NULL)
636 *bo = NULL;
637 }
638
639 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
640 u64 min_offset, u64 max_offset,
641 u64 *gpu_addr)
642 {
643 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
644 int r, i;
645 unsigned fpfn, lpfn;
646
647 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
648 return -EPERM;
649
650 if (WARN_ON_ONCE(min_offset > max_offset))
651 return -EINVAL;
652
653 /* A shared bo cannot be migrated to VRAM */
654 if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
655 return -EINVAL;
656
657 if (bo->pin_count) {
658 uint32_t mem_type = bo->tbo.mem.mem_type;
659
660 if (domain != amdgpu_mem_type_to_domain(mem_type))
661 return -EINVAL;
662
663 bo->pin_count++;
664 if (gpu_addr)
665 *gpu_addr = amdgpu_bo_gpu_offset(bo);
666
667 if (max_offset != 0) {
668 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
669 WARN_ON_ONCE(max_offset <
670 (amdgpu_bo_gpu_offset(bo) - domain_start));
671 }
672
673 return 0;
674 }
675
676 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
677 amdgpu_ttm_placement_from_domain(bo, domain);
678 for (i = 0; i < bo->placement.num_placement; i++) {
679 /* force to pin into visible video ram */
680 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
681 !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
682 (!max_offset || max_offset >
683 adev->mc.visible_vram_size)) {
684 if (WARN_ON_ONCE(min_offset >
685 adev->mc.visible_vram_size))
686 return -EINVAL;
687 fpfn = min_offset >> PAGE_SHIFT;
688 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
689 } else {
690 fpfn = min_offset >> PAGE_SHIFT;
691 lpfn = max_offset >> PAGE_SHIFT;
692 }
693 if (fpfn > bo->placements[i].fpfn)
694 bo->placements[i].fpfn = fpfn;
695 if (!bo->placements[i].lpfn ||
696 (lpfn && lpfn < bo->placements[i].lpfn))
697 bo->placements[i].lpfn = lpfn;
698 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
699 }
700
701 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
702 if (unlikely(r)) {
703 dev_err(adev->dev, "%p pin failed\n", bo);
704 goto error;
705 }
706 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
707 if (unlikely(r)) {
708 dev_err(adev->dev, "%p bind failed\n", bo);
709 goto error;
710 }
711
712 bo->pin_count = 1;
713 if (gpu_addr != NULL)
714 *gpu_addr = amdgpu_bo_gpu_offset(bo);
715 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
716 adev->vram_pin_size += amdgpu_bo_size(bo);
717 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
718 adev->invisible_pin_size += amdgpu_bo_size(bo);
719 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
720 adev->gart_pin_size += amdgpu_bo_size(bo);
721 }
722
723 error:
724 return r;
725 }
726
727 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
728 {
729 return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
730 }
731
732 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
733 {
734 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
735 int r, i;
736
737 if (!bo->pin_count) {
738 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
739 return 0;
740 }
741 bo->pin_count--;
742 if (bo->pin_count)
743 return 0;
744 for (i = 0; i < bo->placement.num_placement; i++) {
745 bo->placements[i].lpfn = 0;
746 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
747 }
748 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
749 if (unlikely(r)) {
750 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
751 goto error;
752 }
753
754 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
755 adev->vram_pin_size -= amdgpu_bo_size(bo);
756 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
757 adev->invisible_pin_size -= amdgpu_bo_size(bo);
758 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
759 adev->gart_pin_size -= amdgpu_bo_size(bo);
760 }
761
762 error:
763 return r;
764 }
765
766 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
767 {
768 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
769 if (0 && (adev->flags & AMD_IS_APU)) {
770 /* Useless to evict on IGP chips */
771 return 0;
772 }
773 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
774 }
775
776 static const char *amdgpu_vram_names[] = {
777 "UNKNOWN",
778 "GDDR1",
779 "DDR2",
780 "GDDR3",
781 "GDDR4",
782 "GDDR5",
783 "HBM",
784 "DDR3"
785 };
786
787 int amdgpu_bo_init(struct amdgpu_device *adev)
788 {
789 /* reserve PAT memory space to WC for VRAM */
790 arch_io_reserve_memtype_wc(adev->mc.aper_base,
791 adev->mc.aper_size);
792
793 /* Add an MTRR for the VRAM */
794 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
795 adev->mc.aper_size);
796 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
797 adev->mc.mc_vram_size >> 20,
798 (unsigned long long)adev->mc.aper_size >> 20);
799 DRM_INFO("RAM width %dbits %s\n",
800 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
801 return amdgpu_ttm_init(adev);
802 }
803
804 void amdgpu_bo_fini(struct amdgpu_device *adev)
805 {
806 amdgpu_ttm_fini(adev);
807 arch_phys_wc_del(adev->mc.vram_mtrr);
808 arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
809 }
810
811 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
812 struct vm_area_struct *vma)
813 {
814 return ttm_fbdev_mmap(vma, &bo->tbo);
815 }
816
817 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
818 {
819 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
820
821 if (adev->family <= AMDGPU_FAMILY_CZ &&
822 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
823 return -EINVAL;
824
825 bo->tiling_flags = tiling_flags;
826 return 0;
827 }
828
829 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
830 {
831 lockdep_assert_held(&bo->tbo.resv->lock.base);
832
833 if (tiling_flags)
834 *tiling_flags = bo->tiling_flags;
835 }
836
837 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
838 uint32_t metadata_size, uint64_t flags)
839 {
840 void *buffer;
841
842 if (!metadata_size) {
843 if (bo->metadata_size) {
844 kfree(bo->metadata);
845 bo->metadata = NULL;
846 bo->metadata_size = 0;
847 }
848 return 0;
849 }
850
851 if (metadata == NULL)
852 return -EINVAL;
853
854 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
855 if (buffer == NULL)
856 return -ENOMEM;
857
858 kfree(bo->metadata);
859 bo->metadata_flags = flags;
860 bo->metadata = buffer;
861 bo->metadata_size = metadata_size;
862
863 return 0;
864 }
865
866 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
867 size_t buffer_size, uint32_t *metadata_size,
868 uint64_t *flags)
869 {
870 if (!buffer && !metadata_size)
871 return -EINVAL;
872
873 if (buffer) {
874 if (buffer_size < bo->metadata_size)
875 return -EINVAL;
876
877 if (bo->metadata_size)
878 memcpy(buffer, bo->metadata, bo->metadata_size);
879 }
880
881 if (metadata_size)
882 *metadata_size = bo->metadata_size;
883 if (flags)
884 *flags = bo->metadata_flags;
885
886 return 0;
887 }
888
889 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
890 bool evict,
891 struct ttm_mem_reg *new_mem)
892 {
893 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
894 struct amdgpu_bo *abo;
895 struct ttm_mem_reg *old_mem = &bo->mem;
896
897 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
898 return;
899
900 abo = container_of(bo, struct amdgpu_bo, tbo);
901 amdgpu_vm_bo_invalidate(adev, abo);
902
903 /* remember the eviction */
904 if (evict)
905 atomic64_inc(&adev->num_evictions);
906
907 /* update statistics */
908 if (!new_mem)
909 return;
910
911 /* move_notify is called before move happens */
912 amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
913
914 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
915 }
916
917 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
918 {
919 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
920 struct amdgpu_bo *abo;
921 unsigned long offset, size, lpfn;
922 int i, r;
923
924 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
925 return 0;
926
927 abo = container_of(bo, struct amdgpu_bo, tbo);
928 if (bo->mem.mem_type != TTM_PL_VRAM)
929 return 0;
930
931 size = bo->mem.num_pages << PAGE_SHIFT;
932 offset = bo->mem.start << PAGE_SHIFT;
933 /* TODO: figure out how to map scattered VRAM to the CPU */
934 if ((offset + size) <= adev->mc.visible_vram_size)
935 return 0;
936
937 /* Can't move a pinned BO to visible VRAM */
938 if (abo->pin_count > 0)
939 return -EINVAL;
940
941 /* hurrah the memory is not visible ! */
942 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
943 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
944 for (i = 0; i < abo->placement.num_placement; i++) {
945 /* Force into visible VRAM */
946 if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
947 (!abo->placements[i].lpfn ||
948 abo->placements[i].lpfn > lpfn))
949 abo->placements[i].lpfn = lpfn;
950 }
951 r = ttm_bo_validate(bo, &abo->placement, false, false);
952 if (unlikely(r == -ENOMEM)) {
953 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
954 return ttm_bo_validate(bo, &abo->placement, false, false);
955 } else if (unlikely(r != 0)) {
956 return r;
957 }
958
959 offset = bo->mem.start << PAGE_SHIFT;
960 /* this should never happen */
961 if ((offset + size) > adev->mc.visible_vram_size)
962 return -EINVAL;
963
964 return 0;
965 }
966
967 /**
968 * amdgpu_bo_fence - add fence to buffer object
969 *
970 * @bo: buffer object in question
971 * @fence: fence to add
972 * @shared: true if fence should be added shared
973 *
974 */
975 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
976 bool shared)
977 {
978 struct reservation_object *resv = bo->tbo.resv;
979
980 if (shared)
981 reservation_object_add_shared_fence(resv, fence);
982 else
983 reservation_object_add_excl_fence(resv, fence);
984 }
985
986 /**
987 * amdgpu_bo_gpu_offset - return GPU offset of bo
988 * @bo: amdgpu object for which we query the offset
989 *
990 * Returns current GPU offset of the object.
991 *
992 * Note: object should either be pinned or reserved when calling this
993 * function, it might be useful to add check for this for debugging.
994 */
995 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
996 {
997 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
998 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
999 !amdgpu_ttm_is_bound(bo->tbo.ttm));
1000 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1001 !bo->pin_count);
1002 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1003 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1004 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1005
1006 return bo->tbo.offset;
1007 }