]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/radeon/radeon_object.c
Merge branches 'x86/amd', 'x86/vt-d', 'arm/rockchip', 'arm/omap', 'arm/mediatek'...
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / radeon / radeon_object.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
5a0e3ad6 33#include <linux/slab.h>
771fe6b9 34#include <drm/drmP.h>
760285e7 35#include <drm/radeon_drm.h>
c5244987 36#include <drm/drm_cache.h>
771fe6b9 37#include "radeon.h"
99ee7fac 38#include "radeon_trace.h"
771fe6b9 39
771fe6b9
JG
40
41int radeon_ttm_init(struct radeon_device *rdev);
42void radeon_ttm_fini(struct radeon_device *rdev);
4c788679 43static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
771fe6b9
JG
44
45/*
46 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
47 * function are calling it.
48 */
49
67e8e3f9
MO
50static void radeon_update_memory_usage(struct radeon_bo *bo,
51 unsigned mem_type, int sign)
52{
53 struct radeon_device *rdev = bo->rdev;
54 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
55
56 switch (mem_type) {
57 case TTM_PL_TT:
58 if (sign > 0)
59 atomic64_add(size, &rdev->gtt_usage);
60 else
61 atomic64_sub(size, &rdev->gtt_usage);
62 break;
63 case TTM_PL_VRAM:
64 if (sign > 0)
65 atomic64_add(size, &rdev->vram_usage);
66 else
67 atomic64_sub(size, &rdev->vram_usage);
68 break;
69 }
70}
71
4c788679 72static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
771fe6b9 73{
4c788679 74 struct radeon_bo *bo;
771fe6b9 75
4c788679 76 bo = container_of(tbo, struct radeon_bo, tbo);
67e8e3f9
MO
77
78 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
79
4c788679
JG
80 mutex_lock(&bo->rdev->gem.mutex);
81 list_del_init(&bo->list);
82 mutex_unlock(&bo->rdev->gem.mutex);
83 radeon_bo_clear_surface_reg(bo);
634b6a8a 84 WARN_ON_ONCE(!list_empty(&bo->va));
0f4f715b
CK
85 if (bo->gem_base.import_attach)
86 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
441921d5 87 drm_gem_object_release(&bo->gem_base);
4c788679 88 kfree(bo);
771fe6b9
JG
89}
90
d03d8589
JG
91bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
92{
93 if (bo->destroy == &radeon_ttm_bo_destroy)
94 return true;
95 return false;
96}
97
312ea8da
JG
98void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
99{
deadcb36 100 u32 c = 0, i;
312ea8da 101
312ea8da 102 rbo->placement.placement = rbo->placements;
20707874 103 rbo->placement.busy_placement = rbo->placements;
c9da4a4b
MD
104 if (domain & RADEON_GEM_DOMAIN_VRAM) {
105 /* Try placing BOs which don't need CPU access outside of the
106 * CPU accessible part of VRAM
107 */
108 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
109 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
110 rbo->placements[c].fpfn =
111 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
112 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
113 TTM_PL_FLAG_UNCACHED |
114 TTM_PL_FLAG_VRAM;
115 }
116
117 rbo->placements[c].fpfn = 0;
f1217ed0
CK
118 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
119 TTM_PL_FLAG_UNCACHED |
120 TTM_PL_FLAG_VRAM;
c9da4a4b 121 }
f1217ed0 122
0d0b3e74 123 if (domain & RADEON_GEM_DOMAIN_GTT) {
02376d82 124 if (rbo->flags & RADEON_GEM_GTT_UC) {
c9da4a4b 125 rbo->placements[c].fpfn = 0;
f1217ed0
CK
126 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
127 TTM_PL_FLAG_TT;
128
02376d82
MD
129 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
130 (rbo->rdev->flags & RADEON_IS_AGP)) {
c9da4a4b 131 rbo->placements[c].fpfn = 0;
f1217ed0
CK
132 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
133 TTM_PL_FLAG_UNCACHED |
02376d82 134 TTM_PL_FLAG_TT;
0d0b3e74 135 } else {
c9da4a4b 136 rbo->placements[c].fpfn = 0;
f1217ed0
CK
137 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
138 TTM_PL_FLAG_TT;
0d0b3e74
JG
139 }
140 }
f1217ed0 141
0d0b3e74 142 if (domain & RADEON_GEM_DOMAIN_CPU) {
02376d82 143 if (rbo->flags & RADEON_GEM_GTT_UC) {
c9da4a4b 144 rbo->placements[c].fpfn = 0;
f1217ed0
CK
145 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
146 TTM_PL_FLAG_SYSTEM;
147
02376d82
MD
148 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
149 rbo->rdev->flags & RADEON_IS_AGP) {
c9da4a4b 150 rbo->placements[c].fpfn = 0;
f1217ed0
CK
151 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
152 TTM_PL_FLAG_UNCACHED |
02376d82 153 TTM_PL_FLAG_SYSTEM;
0d0b3e74 154 } else {
c9da4a4b 155 rbo->placements[c].fpfn = 0;
f1217ed0
CK
156 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
157 TTM_PL_FLAG_SYSTEM;
0d0b3e74
JG
158 }
159 }
c9da4a4b
MD
160 if (!c) {
161 rbo->placements[c].fpfn = 0;
f1217ed0
CK
162 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
163 TTM_PL_FLAG_SYSTEM;
c9da4a4b 164 }
f1217ed0 165
312ea8da
JG
166 rbo->placement.num_placement = c;
167 rbo->placement.num_busy_placement = c;
deadcb36 168
f1217ed0 169 for (i = 0; i < c; ++i) {
c8584039 170 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
c9da4a4b
MD
171 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
172 !rbo->placements[i].fpfn)
c8584039
MD
173 rbo->placements[i].lpfn =
174 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
175 else
176 rbo->placements[i].lpfn = 0;
f1217ed0 177 }
312ea8da
JG
178}
179
441921d5 180int radeon_bo_create(struct radeon_device *rdev,
831b6966
ML
181 unsigned long size, int byte_align, bool kernel,
182 u32 domain, u32 flags, struct sg_table *sg,
183 struct reservation_object *resv,
184 struct radeon_bo **bo_ptr)
771fe6b9 185{
4c788679 186 struct radeon_bo *bo;
771fe6b9 187 enum ttm_bo_type type;
93225b0d 188 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
57de4ba9 189 size_t acc_size;
771fe6b9
JG
190 int r;
191
441921d5
DV
192 size = ALIGN(size, PAGE_SIZE);
193
771fe6b9
JG
194 if (kernel) {
195 type = ttm_bo_type_kernel;
40f5cf99
AD
196 } else if (sg) {
197 type = ttm_bo_type_sg;
771fe6b9
JG
198 } else {
199 type = ttm_bo_type_device;
200 }
4c788679 201 *bo_ptr = NULL;
2b66b50b 202
57de4ba9
JG
203 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
204 sizeof(struct radeon_bo));
205
4c788679
JG
206 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
207 if (bo == NULL)
771fe6b9 208 return -ENOMEM;
441921d5
DV
209 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
210 if (unlikely(r)) {
211 kfree(bo);
212 return r;
213 }
4c788679 214 bo->rdev = rdev;
4c788679
JG
215 bo->surface_reg = -1;
216 INIT_LIST_HEAD(&bo->list);
721604a1 217 INIT_LIST_HEAD(&bo->va);
bda72d58 218 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
3cf8bb1a
JG
219 RADEON_GEM_DOMAIN_GTT |
220 RADEON_GEM_DOMAIN_CPU);
02376d82
MD
221
222 bo->flags = flags;
223 /* PCI GART is always snooped */
224 if (!(rdev->flags & RADEON_IS_PCIE))
225 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
226
96ea47c0
MD
227 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
228 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
229 */
230 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
231 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
232
a08b588e
MD
233#ifdef CONFIG_X86_32
234 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
235 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
236 */
a28bbd58 237 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
a53fa438
MD
238#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
239 /* Don't try to enable write-combining when it can't work, or things
240 * may be slow
241 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
242 */
243
244#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
245 thanks to write-combining
246
93820498
MD
247 if (bo->flags & RADEON_GEM_GTT_WC)
248 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
249 "better performance thanks to write-combining\n");
a28bbd58 250 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
c5244987
OG
251#else
252 /* For architectures that don't support WC memory,
253 * mask out the WC flag from the BO
254 */
255 if (!drm_arch_can_wc_memory())
256 bo->flags &= ~RADEON_GEM_GTT_WC;
a08b588e
MD
257#endif
258
1fb107fc 259 radeon_ttm_placement_from_domain(bo, domain);
5cc6fbab 260 /* Kernel allocation are uninterruptible */
db7fce39 261 down_read(&rdev->pm.mclk_lock);
1fb107fc 262 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
0b91c4a1 263 &bo->placement, page_align, !kernel, NULL,
831b6966 264 acc_size, sg, resv, &radeon_ttm_bo_destroy);
db7fce39 265 up_read(&rdev->pm.mclk_lock);
771fe6b9 266 if (unlikely(r != 0)) {
771fe6b9
JG
267 return r;
268 }
4c788679 269 *bo_ptr = bo;
441921d5 270
99ee7fac 271 trace_radeon_bo_create(bo);
441921d5 272
771fe6b9
JG
273 return 0;
274}
275
4c788679 276int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
771fe6b9 277{
4c788679 278 bool is_iomem;
771fe6b9
JG
279 int r;
280
4c788679 281 if (bo->kptr) {
771fe6b9 282 if (ptr) {
4c788679 283 *ptr = bo->kptr;
771fe6b9 284 }
771fe6b9
JG
285 return 0;
286 }
4c788679 287 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
771fe6b9
JG
288 if (r) {
289 return r;
290 }
4c788679 291 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
771fe6b9 292 if (ptr) {
4c788679 293 *ptr = bo->kptr;
771fe6b9 294 }
4c788679 295 radeon_bo_check_tiling(bo, 0, 0);
771fe6b9
JG
296 return 0;
297}
298
4c788679 299void radeon_bo_kunmap(struct radeon_bo *bo)
771fe6b9 300{
4c788679 301 if (bo->kptr == NULL)
771fe6b9 302 return;
4c788679
JG
303 bo->kptr = NULL;
304 radeon_bo_check_tiling(bo, 0, 0);
305 ttm_bo_kunmap(&bo->kmap);
771fe6b9
JG
306}
307
512d8afc
CK
308struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
309{
310 if (bo == NULL)
311 return NULL;
312
313 ttm_bo_reference(&bo->tbo);
314 return bo;
315}
316
4c788679 317void radeon_bo_unref(struct radeon_bo **bo)
771fe6b9 318{
4c788679 319 struct ttm_buffer_object *tbo;
f4b7fb94 320 struct radeon_device *rdev;
771fe6b9 321
4c788679 322 if ((*bo) == NULL)
771fe6b9 323 return;
f4b7fb94 324 rdev = (*bo)->rdev;
4c788679
JG
325 tbo = &((*bo)->tbo);
326 ttm_bo_unref(&tbo);
327 if (tbo == NULL)
328 *bo = NULL;
771fe6b9
JG
329}
330
c4353016
MD
331int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
332 u64 *gpu_addr)
771fe6b9 333{
19be5570 334 struct ttm_operation_ctx ctx = { false, false };
312ea8da 335 int r, i;
771fe6b9 336
f72a113a
CK
337 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
338 return -EPERM;
339
4c788679
JG
340 if (bo->pin_count) {
341 bo->pin_count++;
342 if (gpu_addr)
343 *gpu_addr = radeon_bo_gpu_offset(bo);
d936622c
MD
344
345 if (max_offset != 0) {
346 u64 domain_start;
347
348 if (domain == RADEON_GEM_DOMAIN_VRAM)
349 domain_start = bo->rdev->mc.vram_start;
350 else
351 domain_start = bo->rdev->mc.gtt_start;
e199fd42
MD
352 WARN_ON_ONCE(max_offset <
353 (radeon_bo_gpu_offset(bo) - domain_start));
d936622c
MD
354 }
355
771fe6b9
JG
356 return 0;
357 }
ede2e019
CJHR
358 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
359 /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
360 return -EINVAL;
361 }
362
312ea8da 363 radeon_ttm_placement_from_domain(bo, domain);
f1217ed0 364 for (i = 0; i < bo->placement.num_placement; i++) {
3ca82da3 365 /* force to pin into visible video ram */
b76ee67a 366 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
f266f04d 367 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
b76ee67a
MD
368 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
369 bo->placements[i].lpfn =
370 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
f1217ed0 371 else
b76ee67a 372 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
c4353016 373
f1217ed0 374 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
c4353016 375 }
f1217ed0 376
19be5570 377 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
4c788679
JG
378 if (likely(r == 0)) {
379 bo->pin_count = 1;
380 if (gpu_addr != NULL)
381 *gpu_addr = radeon_bo_gpu_offset(bo);
71ecc97e
AD
382 if (domain == RADEON_GEM_DOMAIN_VRAM)
383 bo->rdev->vram_pin_size += radeon_bo_size(bo);
384 else
385 bo->rdev->gart_pin_size += radeon_bo_size(bo);
386 } else {
4c788679 387 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
71ecc97e 388 }
771fe6b9
JG
389 return r;
390}
c4353016
MD
391
392int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
393{
394 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
395}
771fe6b9 396
4c788679 397int radeon_bo_unpin(struct radeon_bo *bo)
771fe6b9 398{
19be5570 399 struct ttm_operation_ctx ctx = { false, false };
312ea8da 400 int r, i;
771fe6b9 401
4c788679
JG
402 if (!bo->pin_count) {
403 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
404 return 0;
771fe6b9 405 }
4c788679
JG
406 bo->pin_count--;
407 if (bo->pin_count)
408 return 0;
f1217ed0
CK
409 for (i = 0; i < bo->placement.num_placement; i++) {
410 bo->placements[i].lpfn = 0;
411 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
412 }
19be5570 413 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
71ecc97e
AD
414 if (likely(r == 0)) {
415 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
416 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
417 else
418 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
419 } else {
4c788679 420 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
71ecc97e 421 }
5cc6fbab 422 return r;
cefb87ef
DA
423}
424
4c788679 425int radeon_bo_evict_vram(struct radeon_device *rdev)
771fe6b9 426{
d796d844
DA
427 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
428 if (0 && (rdev->flags & RADEON_IS_IGP)) {
06b6476d
AD
429 if (rdev->mc.igp_sideport_enabled == false)
430 /* Useless to evict on IGP chips */
431 return 0;
771fe6b9
JG
432 }
433 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
434}
435
4c788679 436void radeon_bo_force_delete(struct radeon_device *rdev)
771fe6b9 437{
4c788679 438 struct radeon_bo *bo, *n;
771fe6b9
JG
439
440 if (list_empty(&rdev->gem.objects)) {
441 return;
442 }
4c788679
JG
443 dev_err(rdev->dev, "Userspace still has active objects !\n");
444 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
4c788679 445 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
31c3603d
DV
446 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
447 *((unsigned long *)&bo->gem_base.refcount));
4c788679
JG
448 mutex_lock(&bo->rdev->gem.mutex);
449 list_del_init(&bo->list);
450 mutex_unlock(&bo->rdev->gem.mutex);
91132d6b 451 /* this should unref the ttm bo */
07f65bb2 452 drm_gem_object_put_unlocked(&bo->gem_base);
771fe6b9
JG
453 }
454}
455
4c788679 456int radeon_bo_init(struct radeon_device *rdev)
771fe6b9 457{
7cf321d1
DA
458 /* reserve PAT memory space to WC for VRAM */
459 arch_io_reserve_memtype_wc(rdev->mc.aper_base,
460 rdev->mc.aper_size);
461
a4d68279 462 /* Add an MTRR for the VRAM */
a0a53aa8 463 if (!rdev->fastfb_working) {
07ebea25
AL
464 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
465 rdev->mc.aper_size);
a0a53aa8 466 }
a4d68279
JG
467 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
468 rdev->mc.mc_vram_size >> 20,
469 (unsigned long long)rdev->mc.aper_size >> 20);
470 DRM_INFO("RAM width %dbits %cDR\n",
471 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
771fe6b9
JG
472 return radeon_ttm_init(rdev);
473}
474
4c788679 475void radeon_bo_fini(struct radeon_device *rdev)
771fe6b9
JG
476{
477 radeon_ttm_fini(rdev);
07ebea25 478 arch_phys_wc_del(rdev->mc.vram_mtrr);
7cf321d1 479 arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
771fe6b9
JG
480}
481
19dff56a
MO
482/* Returns how many bytes TTM can move per IB.
483 */
484static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
485{
486 u64 real_vram_size = rdev->mc.real_vram_size;
487 u64 vram_usage = atomic64_read(&rdev->vram_usage);
488
489 /* This function is based on the current VRAM usage.
490 *
491 * - If all of VRAM is free, allow relocating the number of bytes that
492 * is equal to 1/4 of the size of VRAM for this IB.
493
494 * - If more than one half of VRAM is occupied, only allow relocating
495 * 1 MB of data for this IB.
496 *
497 * - From 0 to one half of used VRAM, the threshold decreases
498 * linearly.
499 * __________________
500 * 1/4 of -|\ |
501 * VRAM | \ |
502 * | \ |
503 * | \ |
504 * | \ |
505 * | \ |
506 * | \ |
507 * | \________|1 MB
508 * |----------------|
509 * VRAM 0 % 100 %
510 * used used
511 *
512 * Note: It's a threshold, not a limit. The threshold must be crossed
513 * for buffer relocations to stop, so any buffer of an arbitrary size
514 * can be moved as long as the threshold isn't crossed before
515 * the relocation takes place. We don't want to disable buffer
516 * relocations completely.
517 *
518 * The idea is that buffers should be placed in VRAM at creation time
519 * and TTM should only do a minimum number of relocations during
520 * command submission. In practice, you need to submit at least
521 * a dozen IBs to move all buffers to VRAM if they are in GTT.
522 *
523 * Also, things can get pretty crazy under memory pressure and actual
524 * VRAM usage can change a lot, so playing safe even at 50% does
525 * consistently increase performance.
526 */
527
528 u64 half_vram = real_vram_size >> 1;
529 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
530 u64 bytes_moved_threshold = half_free_vram >> 1;
531 return max(bytes_moved_threshold, 1024*1024ull);
532}
533
534int radeon_bo_list_validate(struct radeon_device *rdev,
535 struct ww_acquire_ctx *ticket,
ecff665f 536 struct list_head *head, int ring)
771fe6b9 537{
19be5570 538 struct ttm_operation_ctx ctx = { true, false };
1d0c0942 539 struct radeon_bo_list *lobj;
466be338 540 struct list_head duplicates;
771fe6b9 541 int r;
19dff56a
MO
542 u64 bytes_moved = 0, initial_bytes_moved;
543 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
771fe6b9 544
466be338
CK
545 INIT_LIST_HEAD(&duplicates);
546 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
771fe6b9 547 if (unlikely(r != 0)) {
771fe6b9
JG
548 return r;
549 }
19dff56a 550
147666fb 551 list_for_each_entry(lobj, head, tv.head) {
466be338 552 struct radeon_bo *bo = lobj->robj;
4c788679 553 if (!bo->pin_count) {
5dcd3345 554 u32 domain = lobj->preferred_domains;
3852752c 555 u32 allowed = lobj->allowed_domains;
19dff56a
MO
556 u32 current_domain =
557 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
558
559 /* Check if this buffer will be moved and don't move it
560 * if we have moved too many buffers for this IB already.
561 *
562 * Note that this allows moving at least one buffer of
563 * any size, because it doesn't take the current "bo"
564 * into account. We don't want to disallow buffer moves
565 * completely.
566 */
3852752c 567 if ((allowed & current_domain) != 0 &&
19dff56a
MO
568 (domain & current_domain) == 0 && /* will be moved */
569 bytes_moved > bytes_moved_threshold) {
570 /* don't move it */
571 domain = current_domain;
572 }
573
20707874
AD
574 retry:
575 radeon_ttm_placement_from_domain(bo, domain);
f2ba57b5 576 if (ring == R600_RING_TYPE_UVD_INDEX)
3852752c 577 radeon_uvd_force_into_uvd_segment(bo, allowed);
19dff56a
MO
578
579 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
19be5570 580 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
19dff56a
MO
581 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
582 initial_bytes_moved;
583
e376573f 584 if (unlikely(r)) {
ce6758c8
CK
585 if (r != -ERESTARTSYS &&
586 domain != lobj->allowed_domains) {
587 domain = lobj->allowed_domains;
20707874
AD
588 goto retry;
589 }
1b6e5fd5 590 ttm_eu_backoff_reservation(ticket, head);
771fe6b9 591 return r;
e376573f 592 }
771fe6b9 593 }
4c788679
JG
594 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
595 lobj->tiling_flags = bo->tiling_flags;
771fe6b9 596 }
466be338
CK
597
598 list_for_each_entry(lobj, &duplicates, tv.head) {
599 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
600 lobj->tiling_flags = lobj->robj->tiling_flags;
601 }
602
771fe6b9
JG
603 return 0;
604}
605
550e2d92 606int radeon_bo_get_surface_reg(struct radeon_bo *bo)
771fe6b9 607{
4c788679 608 struct radeon_device *rdev = bo->rdev;
e024e110 609 struct radeon_surface_reg *reg;
4c788679 610 struct radeon_bo *old_object;
e024e110
DA
611 int steal;
612 int i;
613
977c38d5 614 lockdep_assert_held(&bo->tbo.resv->lock.base);
4c788679
JG
615
616 if (!bo->tiling_flags)
e024e110
DA
617 return 0;
618
4c788679
JG
619 if (bo->surface_reg >= 0) {
620 reg = &rdev->surface_regs[bo->surface_reg];
621 i = bo->surface_reg;
e024e110
DA
622 goto out;
623 }
624
625 steal = -1;
626 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
627
628 reg = &rdev->surface_regs[i];
4c788679 629 if (!reg->bo)
e024e110
DA
630 break;
631
4c788679 632 old_object = reg->bo;
e024e110
DA
633 if (old_object->pin_count == 0)
634 steal = i;
635 }
636
637 /* if we are all out */
638 if (i == RADEON_GEM_MAX_SURFACES) {
639 if (steal == -1)
640 return -ENOMEM;
641 /* find someone with a surface reg and nuke their BO */
642 reg = &rdev->surface_regs[steal];
4c788679 643 old_object = reg->bo;
e024e110
DA
644 /* blow away the mapping */
645 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
4c788679 646 ttm_bo_unmap_virtual(&old_object->tbo);
e024e110
DA
647 old_object->surface_reg = -1;
648 i = steal;
649 }
650
4c788679
JG
651 bo->surface_reg = i;
652 reg->bo = bo;
e024e110
DA
653
654out:
4c788679 655 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
d961db75 656 bo->tbo.mem.start << PAGE_SHIFT,
4c788679 657 bo->tbo.num_pages << PAGE_SHIFT);
e024e110
DA
658 return 0;
659}
660
4c788679 661static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
e024e110 662{
4c788679 663 struct radeon_device *rdev = bo->rdev;
e024e110
DA
664 struct radeon_surface_reg *reg;
665
4c788679 666 if (bo->surface_reg == -1)
e024e110
DA
667 return;
668
4c788679
JG
669 reg = &rdev->surface_regs[bo->surface_reg];
670 radeon_clear_surface_reg(rdev, bo->surface_reg);
e024e110 671
4c788679
JG
672 reg->bo = NULL;
673 bo->surface_reg = -1;
e024e110
DA
674}
675
4c788679
JG
676int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
677 uint32_t tiling_flags, uint32_t pitch)
e024e110 678{
285484e2 679 struct radeon_device *rdev = bo->rdev;
4c788679
JG
680 int r;
681
285484e2
JG
682 if (rdev->family >= CHIP_CEDAR) {
683 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
684
685 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
686 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
687 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
688 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
689 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
690 switch (bankw) {
691 case 0:
692 case 1:
693 case 2:
694 case 4:
695 case 8:
696 break;
697 default:
698 return -EINVAL;
699 }
700 switch (bankh) {
701 case 0:
702 case 1:
703 case 2:
704 case 4:
705 case 8:
706 break;
707 default:
708 return -EINVAL;
709 }
710 switch (mtaspect) {
711 case 0:
712 case 1:
713 case 2:
714 case 4:
715 case 8:
716 break;
717 default:
718 return -EINVAL;
719 }
720 if (tilesplit > 6) {
721 return -EINVAL;
722 }
723 if (stilesplit > 6) {
724 return -EINVAL;
725 }
726 }
4c788679
JG
727 r = radeon_bo_reserve(bo, false);
728 if (unlikely(r != 0))
729 return r;
730 bo->tiling_flags = tiling_flags;
731 bo->pitch = pitch;
732 radeon_bo_unreserve(bo);
733 return 0;
e024e110
DA
734}
735
4c788679
JG
736void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
737 uint32_t *tiling_flags,
738 uint32_t *pitch)
e024e110 739{
977c38d5
ML
740 lockdep_assert_held(&bo->tbo.resv->lock.base);
741
e024e110 742 if (tiling_flags)
4c788679 743 *tiling_flags = bo->tiling_flags;
e024e110 744 if (pitch)
4c788679 745 *pitch = bo->pitch;
e024e110
DA
746}
747
4c788679
JG
748int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
749 bool force_drop)
e024e110 750{
977c38d5
ML
751 if (!force_drop)
752 lockdep_assert_held(&bo->tbo.resv->lock.base);
4c788679
JG
753
754 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
e024e110
DA
755 return 0;
756
757 if (force_drop) {
4c788679 758 radeon_bo_clear_surface_reg(bo);
e024e110
DA
759 return 0;
760 }
761
4c788679 762 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
e024e110
DA
763 if (!has_moved)
764 return 0;
765
4c788679
JG
766 if (bo->surface_reg >= 0)
767 radeon_bo_clear_surface_reg(bo);
e024e110
DA
768 return 0;
769 }
770
4c788679 771 if ((bo->surface_reg >= 0) && !has_moved)
e024e110
DA
772 return 0;
773
4c788679 774 return radeon_bo_get_surface_reg(bo);
e024e110
DA
775}
776
777void radeon_bo_move_notify(struct ttm_buffer_object *bo,
66257db7 778 bool evict,
67e8e3f9 779 struct ttm_mem_reg *new_mem)
e024e110 780{
d03d8589 781 struct radeon_bo *rbo;
67e8e3f9 782
d03d8589
JG
783 if (!radeon_ttm_bo_is_radeon_bo(bo))
784 return;
67e8e3f9 785
d03d8589 786 rbo = container_of(bo, struct radeon_bo, tbo);
4c788679 787 radeon_bo_check_tiling(rbo, 0, 1);
721604a1 788 radeon_vm_bo_invalidate(rbo->rdev, rbo);
67e8e3f9
MO
789
790 /* update statistics */
791 if (!new_mem)
792 return;
793
794 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
795 radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
e024e110
DA
796}
797
0a2d50e3 798int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
e024e110 799{
19be5570 800 struct ttm_operation_ctx ctx = { false, false };
0a2d50e3 801 struct radeon_device *rdev;
d03d8589 802 struct radeon_bo *rbo;
c9da4a4b
MD
803 unsigned long offset, size, lpfn;
804 int i, r;
0a2d50e3 805
d03d8589 806 if (!radeon_ttm_bo_is_radeon_bo(bo))
0a2d50e3 807 return 0;
d03d8589 808 rbo = container_of(bo, struct radeon_bo, tbo);
4c788679 809 radeon_bo_check_tiling(rbo, 0, 0);
0a2d50e3 810 rdev = rbo->rdev;
54409259
CK
811 if (bo->mem.mem_type != TTM_PL_VRAM)
812 return 0;
813
814 size = bo->mem.num_pages << PAGE_SHIFT;
815 offset = bo->mem.start << PAGE_SHIFT;
816 if ((offset + size) <= rdev->mc.visible_vram_size)
817 return 0;
818
e1a575ad
MD
819 /* Can't move a pinned BO to visible VRAM */
820 if (rbo->pin_count > 0)
821 return -EINVAL;
822
54409259
CK
823 /* hurrah the memory is not visible ! */
824 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
c9da4a4b
MD
825 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
826 for (i = 0; i < rbo->placement.num_placement; i++) {
827 /* Force into visible VRAM */
828 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
829 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
830 rbo->placements[i].lpfn = lpfn;
831 }
19be5570 832 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
54409259
CK
833 if (unlikely(r == -ENOMEM)) {
834 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
19be5570 835 return ttm_bo_validate(bo, &rbo->placement, &ctx);
54409259
CK
836 } else if (unlikely(r != 0)) {
837 return r;
0a2d50e3 838 }
54409259
CK
839
840 offset = bo->mem.start << PAGE_SHIFT;
841 /* this should never happen */
842 if ((offset + size) > rdev->mc.visible_vram_size)
843 return -EINVAL;
844
0a2d50e3 845 return 0;
e024e110 846}
ce580fab 847
83f30d0e 848int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
ce580fab
AK
849{
850 int r;
851
dfd5e50e 852 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
ce580fab
AK
853 if (unlikely(r != 0))
854 return r;
ce580fab
AK
855 if (mem_type)
856 *mem_type = bo->tbo.mem.mem_type;
f2c24b83 857
8aa6d4fc 858 r = ttm_bo_wait(&bo->tbo, true, no_wait);
ce580fab
AK
859 ttm_bo_unreserve(&bo->tbo);
860 return r;
861}
587cdda8
CK
862
863/**
864 * radeon_bo_fence - add fence to buffer object
865 *
866 * @bo: buffer object in question
867 * @fence: fence to add
868 * @shared: true if fence should be added shared
869 *
870 */
871void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
3cf8bb1a 872 bool shared)
587cdda8
CK
873{
874 struct reservation_object *resv = bo->tbo.resv;
875
876 if (shared)
877 reservation_object_add_shared_fence(resv, &fence->base);
878 else
879 reservation_object_add_excl_fence(resv, &fence->base);
880}