]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/radeon/radeon_object.c
drm/radeon: Always disable RADEON_GEM_GTT_UC along with RADEON_GEM_GTT_WC
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / radeon / radeon_object.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
5a0e3ad6 33#include <linux/slab.h>
771fe6b9 34#include <drm/drmP.h>
760285e7 35#include <drm/radeon_drm.h>
771fe6b9 36#include "radeon.h"
99ee7fac 37#include "radeon_trace.h"
771fe6b9 38
771fe6b9
JG
39
40int radeon_ttm_init(struct radeon_device *rdev);
41void radeon_ttm_fini(struct radeon_device *rdev);
4c788679 42static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
771fe6b9
JG
43
44/*
45 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
46 * function are calling it.
47 */
48
67e8e3f9
MO
49static void radeon_update_memory_usage(struct radeon_bo *bo,
50 unsigned mem_type, int sign)
51{
52 struct radeon_device *rdev = bo->rdev;
53 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
54
55 switch (mem_type) {
56 case TTM_PL_TT:
57 if (sign > 0)
58 atomic64_add(size, &rdev->gtt_usage);
59 else
60 atomic64_sub(size, &rdev->gtt_usage);
61 break;
62 case TTM_PL_VRAM:
63 if (sign > 0)
64 atomic64_add(size, &rdev->vram_usage);
65 else
66 atomic64_sub(size, &rdev->vram_usage);
67 break;
68 }
69}
70
4c788679 71static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
771fe6b9 72{
4c788679 73 struct radeon_bo *bo;
771fe6b9 74
4c788679 75 bo = container_of(tbo, struct radeon_bo, tbo);
67e8e3f9
MO
76
77 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
78
4c788679
JG
79 mutex_lock(&bo->rdev->gem.mutex);
80 list_del_init(&bo->list);
81 mutex_unlock(&bo->rdev->gem.mutex);
82 radeon_bo_clear_surface_reg(bo);
c265f24d 83 WARN_ON(!list_empty(&bo->va));
441921d5 84 drm_gem_object_release(&bo->gem_base);
4c788679 85 kfree(bo);
771fe6b9
JG
86}
87
d03d8589
JG
88bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
89{
90 if (bo->destroy == &radeon_ttm_bo_destroy)
91 return true;
92 return false;
93}
94
312ea8da
JG
95void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
96{
deadcb36 97 u32 c = 0, i;
312ea8da 98
312ea8da 99 rbo->placement.placement = rbo->placements;
20707874 100 rbo->placement.busy_placement = rbo->placements;
c9da4a4b
MD
101 if (domain & RADEON_GEM_DOMAIN_VRAM) {
102 /* Try placing BOs which don't need CPU access outside of the
103 * CPU accessible part of VRAM
104 */
105 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
106 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
107 rbo->placements[c].fpfn =
108 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
109 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
110 TTM_PL_FLAG_UNCACHED |
111 TTM_PL_FLAG_VRAM;
112 }
113
114 rbo->placements[c].fpfn = 0;
f1217ed0
CK
115 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
116 TTM_PL_FLAG_UNCACHED |
117 TTM_PL_FLAG_VRAM;
c9da4a4b 118 }
f1217ed0 119
0d0b3e74 120 if (domain & RADEON_GEM_DOMAIN_GTT) {
02376d82 121 if (rbo->flags & RADEON_GEM_GTT_UC) {
c9da4a4b 122 rbo->placements[c].fpfn = 0;
f1217ed0
CK
123 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
124 TTM_PL_FLAG_TT;
125
02376d82
MD
126 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
127 (rbo->rdev->flags & RADEON_IS_AGP)) {
c9da4a4b 128 rbo->placements[c].fpfn = 0;
f1217ed0
CK
129 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
130 TTM_PL_FLAG_UNCACHED |
02376d82 131 TTM_PL_FLAG_TT;
0d0b3e74 132 } else {
c9da4a4b 133 rbo->placements[c].fpfn = 0;
f1217ed0
CK
134 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
135 TTM_PL_FLAG_TT;
0d0b3e74
JG
136 }
137 }
f1217ed0 138
0d0b3e74 139 if (domain & RADEON_GEM_DOMAIN_CPU) {
02376d82 140 if (rbo->flags & RADEON_GEM_GTT_UC) {
c9da4a4b 141 rbo->placements[c].fpfn = 0;
f1217ed0
CK
142 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
143 TTM_PL_FLAG_SYSTEM;
144
02376d82
MD
145 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
146 rbo->rdev->flags & RADEON_IS_AGP) {
c9da4a4b 147 rbo->placements[c].fpfn = 0;
f1217ed0
CK
148 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
149 TTM_PL_FLAG_UNCACHED |
02376d82 150 TTM_PL_FLAG_SYSTEM;
0d0b3e74 151 } else {
c9da4a4b 152 rbo->placements[c].fpfn = 0;
f1217ed0
CK
153 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
154 TTM_PL_FLAG_SYSTEM;
0d0b3e74
JG
155 }
156 }
c9da4a4b
MD
157 if (!c) {
158 rbo->placements[c].fpfn = 0;
f1217ed0
CK
159 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
160 TTM_PL_FLAG_SYSTEM;
c9da4a4b 161 }
f1217ed0 162
312ea8da
JG
163 rbo->placement.num_placement = c;
164 rbo->placement.num_busy_placement = c;
deadcb36 165
f1217ed0 166 for (i = 0; i < c; ++i) {
c8584039 167 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
c9da4a4b
MD
168 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
169 !rbo->placements[i].fpfn)
c8584039
MD
170 rbo->placements[i].lpfn =
171 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
172 else
173 rbo->placements[i].lpfn = 0;
f1217ed0 174 }
312ea8da
JG
175}
176
441921d5 177int radeon_bo_create(struct radeon_device *rdev,
831b6966
ML
178 unsigned long size, int byte_align, bool kernel,
179 u32 domain, u32 flags, struct sg_table *sg,
180 struct reservation_object *resv,
181 struct radeon_bo **bo_ptr)
771fe6b9 182{
4c788679 183 struct radeon_bo *bo;
771fe6b9 184 enum ttm_bo_type type;
93225b0d 185 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
57de4ba9 186 size_t acc_size;
771fe6b9
JG
187 int r;
188
441921d5
DV
189 size = ALIGN(size, PAGE_SIZE);
190
771fe6b9
JG
191 if (kernel) {
192 type = ttm_bo_type_kernel;
40f5cf99
AD
193 } else if (sg) {
194 type = ttm_bo_type_sg;
771fe6b9
JG
195 } else {
196 type = ttm_bo_type_device;
197 }
4c788679 198 *bo_ptr = NULL;
2b66b50b 199
57de4ba9
JG
200 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
201 sizeof(struct radeon_bo));
202
4c788679
JG
203 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
204 if (bo == NULL)
771fe6b9 205 return -ENOMEM;
441921d5
DV
206 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
207 if (unlikely(r)) {
208 kfree(bo);
209 return r;
210 }
4c788679 211 bo->rdev = rdev;
4c788679
JG
212 bo->surface_reg = -1;
213 INIT_LIST_HEAD(&bo->list);
721604a1 214 INIT_LIST_HEAD(&bo->va);
bda72d58
MO
215 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
216 RADEON_GEM_DOMAIN_GTT |
217 RADEON_GEM_DOMAIN_CPU);
02376d82
MD
218
219 bo->flags = flags;
220 /* PCI GART is always snooped */
221 if (!(rdev->flags & RADEON_IS_PCIE))
222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
223
96ea47c0
MD
224 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
225 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
226 */
227 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
228 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229
a08b588e
MD
230#ifdef CONFIG_X86_32
231 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
232 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
233 */
a28bbd58 234 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
a53fa438
MD
235#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
236 /* Don't try to enable write-combining when it can't work, or things
237 * may be slow
238 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
239 */
240
241#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
242 thanks to write-combining
243
244 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
245 "better performance thanks to write-combining\n");
a28bbd58 246 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
a08b588e
MD
247#endif
248
1fb107fc 249 radeon_ttm_placement_from_domain(bo, domain);
5cc6fbab 250 /* Kernel allocation are uninterruptible */
db7fce39 251 down_read(&rdev->pm.mclk_lock);
1fb107fc 252 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
0b91c4a1 253 &bo->placement, page_align, !kernel, NULL,
831b6966 254 acc_size, sg, resv, &radeon_ttm_bo_destroy);
db7fce39 255 up_read(&rdev->pm.mclk_lock);
771fe6b9 256 if (unlikely(r != 0)) {
771fe6b9
JG
257 return r;
258 }
4c788679 259 *bo_ptr = bo;
441921d5 260
99ee7fac 261 trace_radeon_bo_create(bo);
441921d5 262
771fe6b9
JG
263 return 0;
264}
265
4c788679 266int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
771fe6b9 267{
4c788679 268 bool is_iomem;
771fe6b9
JG
269 int r;
270
4c788679 271 if (bo->kptr) {
771fe6b9 272 if (ptr) {
4c788679 273 *ptr = bo->kptr;
771fe6b9 274 }
771fe6b9
JG
275 return 0;
276 }
4c788679 277 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
771fe6b9
JG
278 if (r) {
279 return r;
280 }
4c788679 281 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
771fe6b9 282 if (ptr) {
4c788679 283 *ptr = bo->kptr;
771fe6b9 284 }
4c788679 285 radeon_bo_check_tiling(bo, 0, 0);
771fe6b9
JG
286 return 0;
287}
288
4c788679 289void radeon_bo_kunmap(struct radeon_bo *bo)
771fe6b9 290{
4c788679 291 if (bo->kptr == NULL)
771fe6b9 292 return;
4c788679
JG
293 bo->kptr = NULL;
294 radeon_bo_check_tiling(bo, 0, 0);
295 ttm_bo_kunmap(&bo->kmap);
771fe6b9
JG
296}
297
512d8afc
CK
298struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
299{
300 if (bo == NULL)
301 return NULL;
302
303 ttm_bo_reference(&bo->tbo);
304 return bo;
305}
306
4c788679 307void radeon_bo_unref(struct radeon_bo **bo)
771fe6b9 308{
4c788679 309 struct ttm_buffer_object *tbo;
f4b7fb94 310 struct radeon_device *rdev;
771fe6b9 311
4c788679 312 if ((*bo) == NULL)
771fe6b9 313 return;
f4b7fb94 314 rdev = (*bo)->rdev;
4c788679
JG
315 tbo = &((*bo)->tbo);
316 ttm_bo_unref(&tbo);
317 if (tbo == NULL)
318 *bo = NULL;
771fe6b9
JG
319}
320
c4353016
MD
321int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
322 u64 *gpu_addr)
771fe6b9 323{
312ea8da 324 int r, i;
771fe6b9 325
f72a113a
CK
326 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
327 return -EPERM;
328
4c788679
JG
329 if (bo->pin_count) {
330 bo->pin_count++;
331 if (gpu_addr)
332 *gpu_addr = radeon_bo_gpu_offset(bo);
d936622c
MD
333
334 if (max_offset != 0) {
335 u64 domain_start;
336
337 if (domain == RADEON_GEM_DOMAIN_VRAM)
338 domain_start = bo->rdev->mc.vram_start;
339 else
340 domain_start = bo->rdev->mc.gtt_start;
e199fd42
MD
341 WARN_ON_ONCE(max_offset <
342 (radeon_bo_gpu_offset(bo) - domain_start));
d936622c
MD
343 }
344
771fe6b9
JG
345 return 0;
346 }
312ea8da 347 radeon_ttm_placement_from_domain(bo, domain);
f1217ed0 348 for (i = 0; i < bo->placement.num_placement; i++) {
3ca82da3 349 /* force to pin into visible video ram */
b76ee67a 350 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
f266f04d 351 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
b76ee67a
MD
352 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
353 bo->placements[i].lpfn =
354 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
f1217ed0 355 else
b76ee67a 356 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
c4353016 357
f1217ed0 358 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
c4353016 359 }
f1217ed0 360
97a875cb 361 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
4c788679
JG
362 if (likely(r == 0)) {
363 bo->pin_count = 1;
364 if (gpu_addr != NULL)
365 *gpu_addr = radeon_bo_gpu_offset(bo);
71ecc97e
AD
366 if (domain == RADEON_GEM_DOMAIN_VRAM)
367 bo->rdev->vram_pin_size += radeon_bo_size(bo);
368 else
369 bo->rdev->gart_pin_size += radeon_bo_size(bo);
370 } else {
4c788679 371 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
71ecc97e 372 }
771fe6b9
JG
373 return r;
374}
c4353016
MD
375
376int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
377{
378 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
379}
771fe6b9 380
4c788679 381int radeon_bo_unpin(struct radeon_bo *bo)
771fe6b9 382{
312ea8da 383 int r, i;
771fe6b9 384
4c788679
JG
385 if (!bo->pin_count) {
386 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
387 return 0;
771fe6b9 388 }
4c788679
JG
389 bo->pin_count--;
390 if (bo->pin_count)
391 return 0;
f1217ed0
CK
392 for (i = 0; i < bo->placement.num_placement; i++) {
393 bo->placements[i].lpfn = 0;
394 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
395 }
97a875cb 396 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
71ecc97e
AD
397 if (likely(r == 0)) {
398 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
399 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
400 else
401 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
402 } else {
4c788679 403 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
71ecc97e 404 }
5cc6fbab 405 return r;
cefb87ef
DA
406}
407
4c788679 408int radeon_bo_evict_vram(struct radeon_device *rdev)
771fe6b9 409{
d796d844
DA
410 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
411 if (0 && (rdev->flags & RADEON_IS_IGP)) {
06b6476d
AD
412 if (rdev->mc.igp_sideport_enabled == false)
413 /* Useless to evict on IGP chips */
414 return 0;
771fe6b9
JG
415 }
416 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
417}
418
4c788679 419void radeon_bo_force_delete(struct radeon_device *rdev)
771fe6b9 420{
4c788679 421 struct radeon_bo *bo, *n;
771fe6b9
JG
422
423 if (list_empty(&rdev->gem.objects)) {
424 return;
425 }
4c788679
JG
426 dev_err(rdev->dev, "Userspace still has active objects !\n");
427 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
4c788679 428 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
31c3603d
DV
429 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
430 *((unsigned long *)&bo->gem_base.refcount));
4c788679
JG
431 mutex_lock(&bo->rdev->gem.mutex);
432 list_del_init(&bo->list);
433 mutex_unlock(&bo->rdev->gem.mutex);
91132d6b 434 /* this should unref the ttm bo */
42192a94 435 drm_gem_object_unreference_unlocked(&bo->gem_base);
771fe6b9
JG
436 }
437}
438
4c788679 439int radeon_bo_init(struct radeon_device *rdev)
771fe6b9 440{
a4d68279 441 /* Add an MTRR for the VRAM */
a0a53aa8 442 if (!rdev->fastfb_working) {
07ebea25
AL
443 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
444 rdev->mc.aper_size);
a0a53aa8 445 }
a4d68279
JG
446 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
447 rdev->mc.mc_vram_size >> 20,
448 (unsigned long long)rdev->mc.aper_size >> 20);
449 DRM_INFO("RAM width %dbits %cDR\n",
450 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
771fe6b9
JG
451 return radeon_ttm_init(rdev);
452}
453
4c788679 454void radeon_bo_fini(struct radeon_device *rdev)
771fe6b9
JG
455{
456 radeon_ttm_fini(rdev);
07ebea25 457 arch_phys_wc_del(rdev->mc.vram_mtrr);
771fe6b9
JG
458}
459
19dff56a
MO
460/* Returns how many bytes TTM can move per IB.
461 */
462static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
463{
464 u64 real_vram_size = rdev->mc.real_vram_size;
465 u64 vram_usage = atomic64_read(&rdev->vram_usage);
466
467 /* This function is based on the current VRAM usage.
468 *
469 * - If all of VRAM is free, allow relocating the number of bytes that
470 * is equal to 1/4 of the size of VRAM for this IB.
471
472 * - If more than one half of VRAM is occupied, only allow relocating
473 * 1 MB of data for this IB.
474 *
475 * - From 0 to one half of used VRAM, the threshold decreases
476 * linearly.
477 * __________________
478 * 1/4 of -|\ |
479 * VRAM | \ |
480 * | \ |
481 * | \ |
482 * | \ |
483 * | \ |
484 * | \ |
485 * | \________|1 MB
486 * |----------------|
487 * VRAM 0 % 100 %
488 * used used
489 *
490 * Note: It's a threshold, not a limit. The threshold must be crossed
491 * for buffer relocations to stop, so any buffer of an arbitrary size
492 * can be moved as long as the threshold isn't crossed before
493 * the relocation takes place. We don't want to disable buffer
494 * relocations completely.
495 *
496 * The idea is that buffers should be placed in VRAM at creation time
497 * and TTM should only do a minimum number of relocations during
498 * command submission. In practice, you need to submit at least
499 * a dozen IBs to move all buffers to VRAM if they are in GTT.
500 *
501 * Also, things can get pretty crazy under memory pressure and actual
502 * VRAM usage can change a lot, so playing safe even at 50% does
503 * consistently increase performance.
504 */
505
506 u64 half_vram = real_vram_size >> 1;
507 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
508 u64 bytes_moved_threshold = half_free_vram >> 1;
509 return max(bytes_moved_threshold, 1024*1024ull);
510}
511
512int radeon_bo_list_validate(struct radeon_device *rdev,
513 struct ww_acquire_ctx *ticket,
ecff665f 514 struct list_head *head, int ring)
771fe6b9 515{
1d0c0942 516 struct radeon_bo_list *lobj;
466be338 517 struct list_head duplicates;
771fe6b9 518 int r;
19dff56a
MO
519 u64 bytes_moved = 0, initial_bytes_moved;
520 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
771fe6b9 521
466be338
CK
522 INIT_LIST_HEAD(&duplicates);
523 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
771fe6b9 524 if (unlikely(r != 0)) {
771fe6b9
JG
525 return r;
526 }
19dff56a 527
147666fb 528 list_for_each_entry(lobj, head, tv.head) {
466be338 529 struct radeon_bo *bo = lobj->robj;
4c788679 530 if (!bo->pin_count) {
ce6758c8 531 u32 domain = lobj->prefered_domains;
3852752c 532 u32 allowed = lobj->allowed_domains;
19dff56a
MO
533 u32 current_domain =
534 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
535
536 /* Check if this buffer will be moved and don't move it
537 * if we have moved too many buffers for this IB already.
538 *
539 * Note that this allows moving at least one buffer of
540 * any size, because it doesn't take the current "bo"
541 * into account. We don't want to disallow buffer moves
542 * completely.
543 */
3852752c 544 if ((allowed & current_domain) != 0 &&
19dff56a
MO
545 (domain & current_domain) == 0 && /* will be moved */
546 bytes_moved > bytes_moved_threshold) {
547 /* don't move it */
548 domain = current_domain;
549 }
550
20707874
AD
551 retry:
552 radeon_ttm_placement_from_domain(bo, domain);
f2ba57b5 553 if (ring == R600_RING_TYPE_UVD_INDEX)
3852752c 554 radeon_uvd_force_into_uvd_segment(bo, allowed);
19dff56a
MO
555
556 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
557 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
558 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
559 initial_bytes_moved;
560
e376573f 561 if (unlikely(r)) {
ce6758c8
CK
562 if (r != -ERESTARTSYS &&
563 domain != lobj->allowed_domains) {
564 domain = lobj->allowed_domains;
20707874
AD
565 goto retry;
566 }
1b6e5fd5 567 ttm_eu_backoff_reservation(ticket, head);
771fe6b9 568 return r;
e376573f 569 }
771fe6b9 570 }
4c788679
JG
571 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
572 lobj->tiling_flags = bo->tiling_flags;
771fe6b9 573 }
466be338
CK
574
575 list_for_each_entry(lobj, &duplicates, tv.head) {
576 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
577 lobj->tiling_flags = lobj->robj->tiling_flags;
578 }
579
771fe6b9
JG
580 return 0;
581}
582
550e2d92 583int radeon_bo_get_surface_reg(struct radeon_bo *bo)
771fe6b9 584{
4c788679 585 struct radeon_device *rdev = bo->rdev;
e024e110 586 struct radeon_surface_reg *reg;
4c788679 587 struct radeon_bo *old_object;
e024e110
DA
588 int steal;
589 int i;
590
977c38d5 591 lockdep_assert_held(&bo->tbo.resv->lock.base);
4c788679
JG
592
593 if (!bo->tiling_flags)
e024e110
DA
594 return 0;
595
4c788679
JG
596 if (bo->surface_reg >= 0) {
597 reg = &rdev->surface_regs[bo->surface_reg];
598 i = bo->surface_reg;
e024e110
DA
599 goto out;
600 }
601
602 steal = -1;
603 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
604
605 reg = &rdev->surface_regs[i];
4c788679 606 if (!reg->bo)
e024e110
DA
607 break;
608
4c788679 609 old_object = reg->bo;
e024e110
DA
610 if (old_object->pin_count == 0)
611 steal = i;
612 }
613
614 /* if we are all out */
615 if (i == RADEON_GEM_MAX_SURFACES) {
616 if (steal == -1)
617 return -ENOMEM;
618 /* find someone with a surface reg and nuke their BO */
619 reg = &rdev->surface_regs[steal];
4c788679 620 old_object = reg->bo;
e024e110
DA
621 /* blow away the mapping */
622 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
4c788679 623 ttm_bo_unmap_virtual(&old_object->tbo);
e024e110
DA
624 old_object->surface_reg = -1;
625 i = steal;
626 }
627
4c788679
JG
628 bo->surface_reg = i;
629 reg->bo = bo;
e024e110
DA
630
631out:
4c788679 632 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
d961db75 633 bo->tbo.mem.start << PAGE_SHIFT,
4c788679 634 bo->tbo.num_pages << PAGE_SHIFT);
e024e110
DA
635 return 0;
636}
637
4c788679 638static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
e024e110 639{
4c788679 640 struct radeon_device *rdev = bo->rdev;
e024e110
DA
641 struct radeon_surface_reg *reg;
642
4c788679 643 if (bo->surface_reg == -1)
e024e110
DA
644 return;
645
4c788679
JG
646 reg = &rdev->surface_regs[bo->surface_reg];
647 radeon_clear_surface_reg(rdev, bo->surface_reg);
e024e110 648
4c788679
JG
649 reg->bo = NULL;
650 bo->surface_reg = -1;
e024e110
DA
651}
652
4c788679
JG
653int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
654 uint32_t tiling_flags, uint32_t pitch)
e024e110 655{
285484e2 656 struct radeon_device *rdev = bo->rdev;
4c788679
JG
657 int r;
658
285484e2
JG
659 if (rdev->family >= CHIP_CEDAR) {
660 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
661
662 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
663 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
664 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
665 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
666 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
667 switch (bankw) {
668 case 0:
669 case 1:
670 case 2:
671 case 4:
672 case 8:
673 break;
674 default:
675 return -EINVAL;
676 }
677 switch (bankh) {
678 case 0:
679 case 1:
680 case 2:
681 case 4:
682 case 8:
683 break;
684 default:
685 return -EINVAL;
686 }
687 switch (mtaspect) {
688 case 0:
689 case 1:
690 case 2:
691 case 4:
692 case 8:
693 break;
694 default:
695 return -EINVAL;
696 }
697 if (tilesplit > 6) {
698 return -EINVAL;
699 }
700 if (stilesplit > 6) {
701 return -EINVAL;
702 }
703 }
4c788679
JG
704 r = radeon_bo_reserve(bo, false);
705 if (unlikely(r != 0))
706 return r;
707 bo->tiling_flags = tiling_flags;
708 bo->pitch = pitch;
709 radeon_bo_unreserve(bo);
710 return 0;
e024e110
DA
711}
712
4c788679
JG
713void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
714 uint32_t *tiling_flags,
715 uint32_t *pitch)
e024e110 716{
977c38d5
ML
717 lockdep_assert_held(&bo->tbo.resv->lock.base);
718
e024e110 719 if (tiling_flags)
4c788679 720 *tiling_flags = bo->tiling_flags;
e024e110 721 if (pitch)
4c788679 722 *pitch = bo->pitch;
e024e110
DA
723}
724
4c788679
JG
725int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
726 bool force_drop)
e024e110 727{
977c38d5
ML
728 if (!force_drop)
729 lockdep_assert_held(&bo->tbo.resv->lock.base);
4c788679
JG
730
731 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
e024e110
DA
732 return 0;
733
734 if (force_drop) {
4c788679 735 radeon_bo_clear_surface_reg(bo);
e024e110
DA
736 return 0;
737 }
738
4c788679 739 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
e024e110
DA
740 if (!has_moved)
741 return 0;
742
4c788679
JG
743 if (bo->surface_reg >= 0)
744 radeon_bo_clear_surface_reg(bo);
e024e110
DA
745 return 0;
746 }
747
4c788679 748 if ((bo->surface_reg >= 0) && !has_moved)
e024e110
DA
749 return 0;
750
4c788679 751 return radeon_bo_get_surface_reg(bo);
e024e110
DA
752}
753
754void radeon_bo_move_notify(struct ttm_buffer_object *bo,
67e8e3f9 755 struct ttm_mem_reg *new_mem)
e024e110 756{
d03d8589 757 struct radeon_bo *rbo;
67e8e3f9 758
d03d8589
JG
759 if (!radeon_ttm_bo_is_radeon_bo(bo))
760 return;
67e8e3f9 761
d03d8589 762 rbo = container_of(bo, struct radeon_bo, tbo);
4c788679 763 radeon_bo_check_tiling(rbo, 0, 1);
721604a1 764 radeon_vm_bo_invalidate(rbo->rdev, rbo);
67e8e3f9
MO
765
766 /* update statistics */
767 if (!new_mem)
768 return;
769
770 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
771 radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
e024e110
DA
772}
773
0a2d50e3 774int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
e024e110 775{
0a2d50e3 776 struct radeon_device *rdev;
d03d8589 777 struct radeon_bo *rbo;
c9da4a4b
MD
778 unsigned long offset, size, lpfn;
779 int i, r;
0a2d50e3 780
d03d8589 781 if (!radeon_ttm_bo_is_radeon_bo(bo))
0a2d50e3 782 return 0;
d03d8589 783 rbo = container_of(bo, struct radeon_bo, tbo);
4c788679 784 radeon_bo_check_tiling(rbo, 0, 0);
0a2d50e3 785 rdev = rbo->rdev;
54409259
CK
786 if (bo->mem.mem_type != TTM_PL_VRAM)
787 return 0;
788
789 size = bo->mem.num_pages << PAGE_SHIFT;
790 offset = bo->mem.start << PAGE_SHIFT;
791 if ((offset + size) <= rdev->mc.visible_vram_size)
792 return 0;
793
794 /* hurrah the memory is not visible ! */
795 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
c9da4a4b
MD
796 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
797 for (i = 0; i < rbo->placement.num_placement; i++) {
798 /* Force into visible VRAM */
799 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
800 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
801 rbo->placements[i].lpfn = lpfn;
802 }
54409259
CK
803 r = ttm_bo_validate(bo, &rbo->placement, false, false);
804 if (unlikely(r == -ENOMEM)) {
805 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
806 return ttm_bo_validate(bo, &rbo->placement, false, false);
807 } else if (unlikely(r != 0)) {
808 return r;
0a2d50e3 809 }
54409259
CK
810
811 offset = bo->mem.start << PAGE_SHIFT;
812 /* this should never happen */
813 if ((offset + size) > rdev->mc.visible_vram_size)
814 return -EINVAL;
815
0a2d50e3 816 return 0;
e024e110 817}
ce580fab 818
83f30d0e 819int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
ce580fab
AK
820{
821 int r;
822
12432354 823 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
ce580fab
AK
824 if (unlikely(r != 0))
825 return r;
ce580fab
AK
826 if (mem_type)
827 *mem_type = bo->tbo.mem.mem_type;
f2c24b83
ML
828
829 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
ce580fab
AK
830 ttm_bo_unreserve(&bo->tbo);
831 return r;
832}
587cdda8
CK
833
834/**
835 * radeon_bo_fence - add fence to buffer object
836 *
837 * @bo: buffer object in question
838 * @fence: fence to add
839 * @shared: true if fence should be added shared
840 *
841 */
842void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
843 bool shared)
844{
845 struct reservation_object *resv = bo->tbo.resv;
846
847 if (shared)
848 reservation_object_add_shared_fence(resv, &fence->base);
849 else
850 reservation_object_add_excl_fence(resv, &fence->base);
851}