]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
f9183127 SR |
32 | |
33 | #include <linux/io.h> | |
771fe6b9 | 34 | #include <linux/list.h> |
5a0e3ad6 | 35 | #include <linux/slab.h> |
f9183127 | 36 | |
c5244987 | 37 | #include <drm/drm_cache.h> |
f9183127 SR |
38 | #include <drm/drm_prime.h> |
39 | #include <drm/radeon_drm.h> | |
40 | ||
771fe6b9 | 41 | #include "radeon.h" |
99ee7fac | 42 | #include "radeon_trace.h" |
afd90af8 | 43 | #include "radeon_ttm.h" |
771fe6b9 | 44 | |
4c788679 | 45 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
771fe6b9 JG |
46 | |
47 | /* | |
48 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | |
49 | * function are calling it. | |
50 | */ | |
51 | ||
67e8e3f9 MO |
52 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
53 | unsigned mem_type, int sign) | |
54 | { | |
55 | struct radeon_device *rdev = bo->rdev; | |
56 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; | |
57 | ||
58 | switch (mem_type) { | |
59 | case TTM_PL_TT: | |
60 | if (sign > 0) | |
61 | atomic64_add(size, &rdev->gtt_usage); | |
62 | else | |
63 | atomic64_sub(size, &rdev->gtt_usage); | |
64 | break; | |
65 | case TTM_PL_VRAM: | |
66 | if (sign > 0) | |
67 | atomic64_add(size, &rdev->vram_usage); | |
68 | else | |
69 | atomic64_sub(size, &rdev->vram_usage); | |
70 | break; | |
71 | } | |
72 | } | |
73 | ||
4c788679 | 74 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
771fe6b9 | 75 | { |
4c788679 | 76 | struct radeon_bo *bo; |
771fe6b9 | 77 | |
4c788679 | 78 | bo = container_of(tbo, struct radeon_bo, tbo); |
67e8e3f9 MO |
79 | |
80 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); | |
81 | ||
4c788679 JG |
82 | mutex_lock(&bo->rdev->gem.mutex); |
83 | list_del_init(&bo->list); | |
84 | mutex_unlock(&bo->rdev->gem.mutex); | |
85 | radeon_bo_clear_surface_reg(bo); | |
634b6a8a | 86 | WARN_ON_ONCE(!list_empty(&bo->va)); |
ce77038f GH |
87 | if (bo->tbo.base.import_attach) |
88 | drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); | |
89 | drm_gem_object_release(&bo->tbo.base); | |
4c788679 | 90 | kfree(bo); |
771fe6b9 JG |
91 | } |
92 | ||
d03d8589 JG |
93 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
94 | { | |
95 | if (bo->destroy == &radeon_ttm_bo_destroy) | |
96 | return true; | |
97 | return false; | |
98 | } | |
99 | ||
312ea8da JG |
100 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
101 | { | |
deadcb36 | 102 | u32 c = 0, i; |
312ea8da | 103 | |
312ea8da | 104 | rbo->placement.placement = rbo->placements; |
20707874 | 105 | rbo->placement.busy_placement = rbo->placements; |
c9da4a4b MD |
106 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
107 | /* Try placing BOs which don't need CPU access outside of the | |
108 | * CPU accessible part of VRAM | |
109 | */ | |
110 | if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && | |
111 | rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { | |
112 | rbo->placements[c].fpfn = | |
113 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
48e07c23 | 114 | rbo->placements[c].mem_type = TTM_PL_VRAM; |
ce65b874 | 115 | rbo->placements[c++].flags = 0; |
c9da4a4b MD |
116 | } |
117 | ||
118 | rbo->placements[c].fpfn = 0; | |
48e07c23 | 119 | rbo->placements[c].mem_type = TTM_PL_VRAM; |
ce65b874 | 120 | rbo->placements[c++].flags = 0; |
c9da4a4b | 121 | } |
f1217ed0 | 122 | |
0d0b3e74 | 123 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
ce65b874 CK |
124 | rbo->placements[c].fpfn = 0; |
125 | rbo->placements[c].mem_type = TTM_PL_TT; | |
126 | rbo->placements[c++].flags = 0; | |
0d0b3e74 | 127 | } |
f1217ed0 | 128 | |
0d0b3e74 | 129 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
ce65b874 CK |
130 | rbo->placements[c].fpfn = 0; |
131 | rbo->placements[c].mem_type = TTM_PL_SYSTEM; | |
132 | rbo->placements[c++].flags = 0; | |
0d0b3e74 | 133 | } |
c9da4a4b MD |
134 | if (!c) { |
135 | rbo->placements[c].fpfn = 0; | |
48e07c23 | 136 | rbo->placements[c].mem_type = TTM_PL_SYSTEM; |
ce65b874 | 137 | rbo->placements[c++].flags = 0; |
c9da4a4b | 138 | } |
f1217ed0 | 139 | |
312ea8da JG |
140 | rbo->placement.num_placement = c; |
141 | rbo->placement.num_busy_placement = c; | |
deadcb36 | 142 | |
f1217ed0 | 143 | for (i = 0; i < c; ++i) { |
c8584039 | 144 | if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && |
48e07c23 | 145 | (rbo->placements[i].mem_type == TTM_PL_VRAM) && |
c9da4a4b | 146 | !rbo->placements[i].fpfn) |
c8584039 MD |
147 | rbo->placements[i].lpfn = |
148 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
149 | else | |
150 | rbo->placements[i].lpfn = 0; | |
f1217ed0 | 151 | } |
312ea8da JG |
152 | } |
153 | ||
441921d5 | 154 | int radeon_bo_create(struct radeon_device *rdev, |
831b6966 ML |
155 | unsigned long size, int byte_align, bool kernel, |
156 | u32 domain, u32 flags, struct sg_table *sg, | |
52791eee | 157 | struct dma_resv *resv, |
831b6966 | 158 | struct radeon_bo **bo_ptr) |
771fe6b9 | 159 | { |
4c788679 | 160 | struct radeon_bo *bo; |
771fe6b9 | 161 | enum ttm_bo_type type; |
93225b0d | 162 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
57de4ba9 | 163 | size_t acc_size; |
771fe6b9 JG |
164 | int r; |
165 | ||
441921d5 DV |
166 | size = ALIGN(size, PAGE_SIZE); |
167 | ||
771fe6b9 JG |
168 | if (kernel) { |
169 | type = ttm_bo_type_kernel; | |
40f5cf99 AD |
170 | } else if (sg) { |
171 | type = ttm_bo_type_sg; | |
771fe6b9 JG |
172 | } else { |
173 | type = ttm_bo_type_device; | |
174 | } | |
4c788679 | 175 | *bo_ptr = NULL; |
2b66b50b | 176 | |
57de4ba9 JG |
177 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
178 | sizeof(struct radeon_bo)); | |
179 | ||
4c788679 JG |
180 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
181 | if (bo == NULL) | |
771fe6b9 | 182 | return -ENOMEM; |
ce77038f | 183 | drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); |
4c788679 | 184 | bo->rdev = rdev; |
4c788679 JG |
185 | bo->surface_reg = -1; |
186 | INIT_LIST_HEAD(&bo->list); | |
721604a1 | 187 | INIT_LIST_HEAD(&bo->va); |
bda72d58 | 188 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
3cf8bb1a JG |
189 | RADEON_GEM_DOMAIN_GTT | |
190 | RADEON_GEM_DOMAIN_CPU); | |
02376d82 MD |
191 | |
192 | bo->flags = flags; | |
193 | /* PCI GART is always snooped */ | |
194 | if (!(rdev->flags & RADEON_IS_PCIE)) | |
195 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | |
196 | ||
96ea47c0 MD |
197 | /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx |
198 | * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 | |
199 | */ | |
200 | if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) | |
201 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | |
202 | ||
a08b588e MD |
203 | #ifdef CONFIG_X86_32 |
204 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit | |
205 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 | |
206 | */ | |
a28bbd58 | 207 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
a53fa438 MD |
208 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
209 | /* Don't try to enable write-combining when it can't work, or things | |
210 | * may be slow | |
211 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 | |
212 | */ | |
c02216ac | 213 | #ifndef CONFIG_COMPILE_TEST |
a53fa438 MD |
214 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
215 | thanks to write-combining | |
c02216ac | 216 | #endif |
a53fa438 | 217 | |
93820498 MD |
218 | if (bo->flags & RADEON_GEM_GTT_WC) |
219 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " | |
220 | "better performance thanks to write-combining\n"); | |
a28bbd58 | 221 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
c5244987 OG |
222 | #else |
223 | /* For architectures that don't support WC memory, | |
224 | * mask out the WC flag from the BO | |
225 | */ | |
226 | if (!drm_arch_can_wc_memory()) | |
227 | bo->flags &= ~RADEON_GEM_GTT_WC; | |
a08b588e MD |
228 | #endif |
229 | ||
1fb107fc | 230 | radeon_ttm_placement_from_domain(bo, domain); |
5cc6fbab | 231 | /* Kernel allocation are uninterruptible */ |
db7fce39 | 232 | down_read(&rdev->pm.mclk_lock); |
1fb107fc | 233 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
724daa4f CK |
234 | &bo->placement, page_align, !kernel, acc_size, |
235 | sg, resv, &radeon_ttm_bo_destroy); | |
db7fce39 | 236 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 | 237 | if (unlikely(r != 0)) { |
771fe6b9 JG |
238 | return r; |
239 | } | |
4c788679 | 240 | *bo_ptr = bo; |
441921d5 | 241 | |
99ee7fac | 242 | trace_radeon_bo_create(bo); |
441921d5 | 243 | |
771fe6b9 JG |
244 | return 0; |
245 | } | |
246 | ||
4c788679 | 247 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
771fe6b9 | 248 | { |
4c788679 | 249 | bool is_iomem; |
771fe6b9 JG |
250 | int r; |
251 | ||
4c788679 | 252 | if (bo->kptr) { |
771fe6b9 | 253 | if (ptr) { |
4c788679 | 254 | *ptr = bo->kptr; |
771fe6b9 | 255 | } |
771fe6b9 JG |
256 | return 0; |
257 | } | |
4c788679 | 258 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
771fe6b9 JG |
259 | if (r) { |
260 | return r; | |
261 | } | |
4c788679 | 262 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
771fe6b9 | 263 | if (ptr) { |
4c788679 | 264 | *ptr = bo->kptr; |
771fe6b9 | 265 | } |
4c788679 | 266 | radeon_bo_check_tiling(bo, 0, 0); |
771fe6b9 JG |
267 | return 0; |
268 | } | |
269 | ||
4c788679 | 270 | void radeon_bo_kunmap(struct radeon_bo *bo) |
771fe6b9 | 271 | { |
4c788679 | 272 | if (bo->kptr == NULL) |
771fe6b9 | 273 | return; |
4c788679 JG |
274 | bo->kptr = NULL; |
275 | radeon_bo_check_tiling(bo, 0, 0); | |
276 | ttm_bo_kunmap(&bo->kmap); | |
771fe6b9 JG |
277 | } |
278 | ||
512d8afc CK |
279 | struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) |
280 | { | |
281 | if (bo == NULL) | |
282 | return NULL; | |
283 | ||
269a8b6e | 284 | ttm_bo_get(&bo->tbo); |
512d8afc CK |
285 | return bo; |
286 | } | |
287 | ||
4c788679 | 288 | void radeon_bo_unref(struct radeon_bo **bo) |
771fe6b9 | 289 | { |
4c788679 | 290 | struct ttm_buffer_object *tbo; |
771fe6b9 | 291 | |
4c788679 | 292 | if ((*bo) == NULL) |
771fe6b9 | 293 | return; |
4c788679 | 294 | tbo = &((*bo)->tbo); |
77605e43 TZ |
295 | ttm_bo_put(tbo); |
296 | *bo = NULL; | |
771fe6b9 JG |
297 | } |
298 | ||
c4353016 MD |
299 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
300 | u64 *gpu_addr) | |
771fe6b9 | 301 | { |
19be5570 | 302 | struct ttm_operation_ctx ctx = { false, false }; |
312ea8da | 303 | int r, i; |
771fe6b9 | 304 | |
a68bb193 | 305 | if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) |
f72a113a CK |
306 | return -EPERM; |
307 | ||
0b8793f6 CK |
308 | if (bo->tbo.pin_count) { |
309 | ttm_bo_pin(&bo->tbo); | |
4c788679 JG |
310 | if (gpu_addr) |
311 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
d936622c MD |
312 | |
313 | if (max_offset != 0) { | |
314 | u64 domain_start; | |
315 | ||
316 | if (domain == RADEON_GEM_DOMAIN_VRAM) | |
317 | domain_start = bo->rdev->mc.vram_start; | |
318 | else | |
319 | domain_start = bo->rdev->mc.gtt_start; | |
e199fd42 MD |
320 | WARN_ON_ONCE(max_offset < |
321 | (radeon_bo_gpu_offset(bo) - domain_start)); | |
d936622c MD |
322 | } |
323 | ||
771fe6b9 JG |
324 | return 0; |
325 | } | |
ede2e019 CJHR |
326 | if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) { |
327 | /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */ | |
328 | return -EINVAL; | |
329 | } | |
330 | ||
312ea8da | 331 | radeon_ttm_placement_from_domain(bo, domain); |
f1217ed0 | 332 | for (i = 0; i < bo->placement.num_placement; i++) { |
3ca82da3 | 333 | /* force to pin into visible video ram */ |
48e07c23 | 334 | if ((bo->placements[i].mem_type == TTM_PL_VRAM) && |
f266f04d | 335 | !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
b76ee67a MD |
336 | (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) |
337 | bo->placements[i].lpfn = | |
338 | bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
f1217ed0 | 339 | else |
b76ee67a | 340 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; |
c4353016 | 341 | } |
f1217ed0 | 342 | |
19be5570 | 343 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
4c788679 | 344 | if (likely(r == 0)) { |
0b8793f6 | 345 | ttm_bo_pin(&bo->tbo); |
4c788679 JG |
346 | if (gpu_addr != NULL) |
347 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
71ecc97e AD |
348 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
349 | bo->rdev->vram_pin_size += radeon_bo_size(bo); | |
350 | else | |
351 | bo->rdev->gart_pin_size += radeon_bo_size(bo); | |
352 | } else { | |
4c788679 | 353 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
71ecc97e | 354 | } |
771fe6b9 JG |
355 | return r; |
356 | } | |
c4353016 MD |
357 | |
358 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |
359 | { | |
360 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); | |
361 | } | |
771fe6b9 | 362 | |
0b8793f6 | 363 | void radeon_bo_unpin(struct radeon_bo *bo) |
771fe6b9 | 364 | { |
0b8793f6 CK |
365 | ttm_bo_unpin(&bo->tbo); |
366 | if (!bo->tbo.pin_count) { | |
71ecc97e AD |
367 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) |
368 | bo->rdev->vram_pin_size -= radeon_bo_size(bo); | |
369 | else | |
370 | bo->rdev->gart_pin_size -= radeon_bo_size(bo); | |
71ecc97e | 371 | } |
cefb87ef DA |
372 | } |
373 | ||
4c788679 | 374 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
771fe6b9 | 375 | { |
4ce032d6 CK |
376 | struct ttm_bo_device *bdev = &rdev->mman.bdev; |
377 | struct ttm_resource_manager *man; | |
378 | ||
d796d844 | 379 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
d6257ab5 PM |
380 | #ifndef CONFIG_HIBERNATION |
381 | if (rdev->flags & RADEON_IS_IGP) { | |
06b6476d AD |
382 | if (rdev->mc.igp_sideport_enabled == false) |
383 | /* Useless to evict on IGP chips */ | |
384 | return 0; | |
771fe6b9 | 385 | } |
d6257ab5 | 386 | #endif |
4ce032d6 | 387 | man = ttm_manager_type(bdev, TTM_PL_VRAM); |
654429bd TZ |
388 | if (!man) |
389 | return 0; | |
4ce032d6 | 390 | return ttm_resource_manager_evict_all(bdev, man); |
771fe6b9 JG |
391 | } |
392 | ||
4c788679 | 393 | void radeon_bo_force_delete(struct radeon_device *rdev) |
771fe6b9 | 394 | { |
4c788679 | 395 | struct radeon_bo *bo, *n; |
771fe6b9 JG |
396 | |
397 | if (list_empty(&rdev->gem.objects)) { | |
398 | return; | |
399 | } | |
4c788679 JG |
400 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
401 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | |
4c788679 | 402 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
ce77038f GH |
403 | &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, |
404 | *((unsigned long *)&bo->tbo.base.refcount)); | |
4c788679 JG |
405 | mutex_lock(&bo->rdev->gem.mutex); |
406 | list_del_init(&bo->list); | |
407 | mutex_unlock(&bo->rdev->gem.mutex); | |
91132d6b | 408 | /* this should unref the ttm bo */ |
f11fb66a | 409 | drm_gem_object_put(&bo->tbo.base); |
771fe6b9 JG |
410 | } |
411 | } | |
412 | ||
4c788679 | 413 | int radeon_bo_init(struct radeon_device *rdev) |
771fe6b9 | 414 | { |
7cf321d1 DA |
415 | /* reserve PAT memory space to WC for VRAM */ |
416 | arch_io_reserve_memtype_wc(rdev->mc.aper_base, | |
417 | rdev->mc.aper_size); | |
418 | ||
a4d68279 | 419 | /* Add an MTRR for the VRAM */ |
a0a53aa8 | 420 | if (!rdev->fastfb_working) { |
07ebea25 AL |
421 | rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, |
422 | rdev->mc.aper_size); | |
a0a53aa8 | 423 | } |
a4d68279 JG |
424 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
425 | rdev->mc.mc_vram_size >> 20, | |
426 | (unsigned long long)rdev->mc.aper_size >> 20); | |
427 | DRM_INFO("RAM width %dbits %cDR\n", | |
428 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | |
771fe6b9 JG |
429 | return radeon_ttm_init(rdev); |
430 | } | |
431 | ||
4c788679 | 432 | void radeon_bo_fini(struct radeon_device *rdev) |
771fe6b9 JG |
433 | { |
434 | radeon_ttm_fini(rdev); | |
07ebea25 | 435 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
7cf321d1 | 436 | arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size); |
771fe6b9 JG |
437 | } |
438 | ||
19dff56a MO |
439 | /* Returns how many bytes TTM can move per IB. |
440 | */ | |
441 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) | |
442 | { | |
443 | u64 real_vram_size = rdev->mc.real_vram_size; | |
444 | u64 vram_usage = atomic64_read(&rdev->vram_usage); | |
445 | ||
446 | /* This function is based on the current VRAM usage. | |
447 | * | |
448 | * - If all of VRAM is free, allow relocating the number of bytes that | |
449 | * is equal to 1/4 of the size of VRAM for this IB. | |
450 | ||
451 | * - If more than one half of VRAM is occupied, only allow relocating | |
452 | * 1 MB of data for this IB. | |
453 | * | |
454 | * - From 0 to one half of used VRAM, the threshold decreases | |
455 | * linearly. | |
456 | * __________________ | |
457 | * 1/4 of -|\ | | |
458 | * VRAM | \ | | |
459 | * | \ | | |
460 | * | \ | | |
461 | * | \ | | |
462 | * | \ | | |
463 | * | \ | | |
464 | * | \________|1 MB | |
465 | * |----------------| | |
466 | * VRAM 0 % 100 % | |
467 | * used used | |
468 | * | |
469 | * Note: It's a threshold, not a limit. The threshold must be crossed | |
470 | * for buffer relocations to stop, so any buffer of an arbitrary size | |
471 | * can be moved as long as the threshold isn't crossed before | |
472 | * the relocation takes place. We don't want to disable buffer | |
473 | * relocations completely. | |
474 | * | |
475 | * The idea is that buffers should be placed in VRAM at creation time | |
476 | * and TTM should only do a minimum number of relocations during | |
477 | * command submission. In practice, you need to submit at least | |
478 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | |
479 | * | |
480 | * Also, things can get pretty crazy under memory pressure and actual | |
481 | * VRAM usage can change a lot, so playing safe even at 50% does | |
482 | * consistently increase performance. | |
483 | */ | |
484 | ||
485 | u64 half_vram = real_vram_size >> 1; | |
486 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | |
487 | u64 bytes_moved_threshold = half_free_vram >> 1; | |
488 | return max(bytes_moved_threshold, 1024*1024ull); | |
489 | } | |
490 | ||
491 | int radeon_bo_list_validate(struct radeon_device *rdev, | |
492 | struct ww_acquire_ctx *ticket, | |
ecff665f | 493 | struct list_head *head, int ring) |
771fe6b9 | 494 | { |
19be5570 | 495 | struct ttm_operation_ctx ctx = { true, false }; |
1d0c0942 | 496 | struct radeon_bo_list *lobj; |
466be338 | 497 | struct list_head duplicates; |
771fe6b9 | 498 | int r; |
19dff56a MO |
499 | u64 bytes_moved = 0, initial_bytes_moved; |
500 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); | |
771fe6b9 | 501 | |
466be338 | 502 | INIT_LIST_HEAD(&duplicates); |
9165fb87 | 503 | r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); |
771fe6b9 | 504 | if (unlikely(r != 0)) { |
771fe6b9 JG |
505 | return r; |
506 | } | |
19dff56a | 507 | |
147666fb | 508 | list_for_each_entry(lobj, head, tv.head) { |
466be338 | 509 | struct radeon_bo *bo = lobj->robj; |
0b8793f6 | 510 | if (!bo->tbo.pin_count) { |
5dcd3345 | 511 | u32 domain = lobj->preferred_domains; |
3852752c | 512 | u32 allowed = lobj->allowed_domains; |
19dff56a MO |
513 | u32 current_domain = |
514 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); | |
515 | ||
516 | /* Check if this buffer will be moved and don't move it | |
517 | * if we have moved too many buffers for this IB already. | |
518 | * | |
519 | * Note that this allows moving at least one buffer of | |
520 | * any size, because it doesn't take the current "bo" | |
521 | * into account. We don't want to disallow buffer moves | |
522 | * completely. | |
523 | */ | |
3852752c | 524 | if ((allowed & current_domain) != 0 && |
19dff56a MO |
525 | (domain & current_domain) == 0 && /* will be moved */ |
526 | bytes_moved > bytes_moved_threshold) { | |
527 | /* don't move it */ | |
528 | domain = current_domain; | |
529 | } | |
530 | ||
20707874 AD |
531 | retry: |
532 | radeon_ttm_placement_from_domain(bo, domain); | |
f2ba57b5 | 533 | if (ring == R600_RING_TYPE_UVD_INDEX) |
3852752c | 534 | radeon_uvd_force_into_uvd_segment(bo, allowed); |
19dff56a MO |
535 | |
536 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); | |
19be5570 | 537 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
19dff56a MO |
538 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - |
539 | initial_bytes_moved; | |
540 | ||
e376573f | 541 | if (unlikely(r)) { |
ce6758c8 CK |
542 | if (r != -ERESTARTSYS && |
543 | domain != lobj->allowed_domains) { | |
544 | domain = lobj->allowed_domains; | |
20707874 AD |
545 | goto retry; |
546 | } | |
1b6e5fd5 | 547 | ttm_eu_backoff_reservation(ticket, head); |
771fe6b9 | 548 | return r; |
e376573f | 549 | } |
771fe6b9 | 550 | } |
4c788679 JG |
551 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
552 | lobj->tiling_flags = bo->tiling_flags; | |
771fe6b9 | 553 | } |
466be338 CK |
554 | |
555 | list_for_each_entry(lobj, &duplicates, tv.head) { | |
556 | lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); | |
557 | lobj->tiling_flags = lobj->robj->tiling_flags; | |
558 | } | |
559 | ||
771fe6b9 JG |
560 | return 0; |
561 | } | |
562 | ||
550e2d92 | 563 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
771fe6b9 | 564 | { |
4c788679 | 565 | struct radeon_device *rdev = bo->rdev; |
e024e110 | 566 | struct radeon_surface_reg *reg; |
4c788679 | 567 | struct radeon_bo *old_object; |
e024e110 DA |
568 | int steal; |
569 | int i; | |
570 | ||
52791eee | 571 | dma_resv_assert_held(bo->tbo.base.resv); |
4c788679 JG |
572 | |
573 | if (!bo->tiling_flags) | |
e024e110 DA |
574 | return 0; |
575 | ||
4c788679 JG |
576 | if (bo->surface_reg >= 0) { |
577 | reg = &rdev->surface_regs[bo->surface_reg]; | |
578 | i = bo->surface_reg; | |
e024e110 DA |
579 | goto out; |
580 | } | |
581 | ||
582 | steal = -1; | |
583 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
584 | ||
585 | reg = &rdev->surface_regs[i]; | |
4c788679 | 586 | if (!reg->bo) |
e024e110 DA |
587 | break; |
588 | ||
4c788679 | 589 | old_object = reg->bo; |
0b8793f6 | 590 | if (old_object->tbo.pin_count == 0) |
e024e110 DA |
591 | steal = i; |
592 | } | |
593 | ||
594 | /* if we are all out */ | |
595 | if (i == RADEON_GEM_MAX_SURFACES) { | |
596 | if (steal == -1) | |
597 | return -ENOMEM; | |
598 | /* find someone with a surface reg and nuke their BO */ | |
599 | reg = &rdev->surface_regs[steal]; | |
4c788679 | 600 | old_object = reg->bo; |
e024e110 DA |
601 | /* blow away the mapping */ |
602 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | |
4c788679 | 603 | ttm_bo_unmap_virtual(&old_object->tbo); |
e024e110 DA |
604 | old_object->surface_reg = -1; |
605 | i = steal; | |
606 | } | |
607 | ||
4c788679 JG |
608 | bo->surface_reg = i; |
609 | reg->bo = bo; | |
e024e110 DA |
610 | |
611 | out: | |
4c788679 | 612 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
d961db75 | 613 | bo->tbo.mem.start << PAGE_SHIFT, |
4c788679 | 614 | bo->tbo.num_pages << PAGE_SHIFT); |
e024e110 DA |
615 | return 0; |
616 | } | |
617 | ||
4c788679 | 618 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
e024e110 | 619 | { |
4c788679 | 620 | struct radeon_device *rdev = bo->rdev; |
e024e110 DA |
621 | struct radeon_surface_reg *reg; |
622 | ||
4c788679 | 623 | if (bo->surface_reg == -1) |
e024e110 DA |
624 | return; |
625 | ||
4c788679 JG |
626 | reg = &rdev->surface_regs[bo->surface_reg]; |
627 | radeon_clear_surface_reg(rdev, bo->surface_reg); | |
e024e110 | 628 | |
4c788679 JG |
629 | reg->bo = NULL; |
630 | bo->surface_reg = -1; | |
e024e110 DA |
631 | } |
632 | ||
4c788679 JG |
633 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
634 | uint32_t tiling_flags, uint32_t pitch) | |
e024e110 | 635 | { |
285484e2 | 636 | struct radeon_device *rdev = bo->rdev; |
4c788679 JG |
637 | int r; |
638 | ||
285484e2 JG |
639 | if (rdev->family >= CHIP_CEDAR) { |
640 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | |
641 | ||
642 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | |
643 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | |
644 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | |
645 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | |
646 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; | |
647 | switch (bankw) { | |
648 | case 0: | |
649 | case 1: | |
650 | case 2: | |
651 | case 4: | |
652 | case 8: | |
653 | break; | |
654 | default: | |
655 | return -EINVAL; | |
656 | } | |
657 | switch (bankh) { | |
658 | case 0: | |
659 | case 1: | |
660 | case 2: | |
661 | case 4: | |
662 | case 8: | |
663 | break; | |
664 | default: | |
665 | return -EINVAL; | |
666 | } | |
667 | switch (mtaspect) { | |
668 | case 0: | |
669 | case 1: | |
670 | case 2: | |
671 | case 4: | |
672 | case 8: | |
673 | break; | |
674 | default: | |
675 | return -EINVAL; | |
676 | } | |
677 | if (tilesplit > 6) { | |
678 | return -EINVAL; | |
679 | } | |
680 | if (stilesplit > 6) { | |
681 | return -EINVAL; | |
682 | } | |
683 | } | |
4c788679 JG |
684 | r = radeon_bo_reserve(bo, false); |
685 | if (unlikely(r != 0)) | |
686 | return r; | |
687 | bo->tiling_flags = tiling_flags; | |
688 | bo->pitch = pitch; | |
689 | radeon_bo_unreserve(bo); | |
690 | return 0; | |
e024e110 DA |
691 | } |
692 | ||
4c788679 JG |
693 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
694 | uint32_t *tiling_flags, | |
695 | uint32_t *pitch) | |
e024e110 | 696 | { |
52791eee | 697 | dma_resv_assert_held(bo->tbo.base.resv); |
977c38d5 | 698 | |
e024e110 | 699 | if (tiling_flags) |
4c788679 | 700 | *tiling_flags = bo->tiling_flags; |
e024e110 | 701 | if (pitch) |
4c788679 | 702 | *pitch = bo->pitch; |
e024e110 DA |
703 | } |
704 | ||
4c788679 JG |
705 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
706 | bool force_drop) | |
e024e110 | 707 | { |
977c38d5 | 708 | if (!force_drop) |
52791eee | 709 | dma_resv_assert_held(bo->tbo.base.resv); |
4c788679 JG |
710 | |
711 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | |
e024e110 DA |
712 | return 0; |
713 | ||
714 | if (force_drop) { | |
4c788679 | 715 | radeon_bo_clear_surface_reg(bo); |
e024e110 DA |
716 | return 0; |
717 | } | |
718 | ||
4c788679 | 719 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
e024e110 DA |
720 | if (!has_moved) |
721 | return 0; | |
722 | ||
4c788679 JG |
723 | if (bo->surface_reg >= 0) |
724 | radeon_bo_clear_surface_reg(bo); | |
e024e110 DA |
725 | return 0; |
726 | } | |
727 | ||
4c788679 | 728 | if ((bo->surface_reg >= 0) && !has_moved) |
e024e110 DA |
729 | return 0; |
730 | ||
4c788679 | 731 | return radeon_bo_get_surface_reg(bo); |
e024e110 DA |
732 | } |
733 | ||
734 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |
66257db7 | 735 | bool evict, |
2966141a | 736 | struct ttm_resource *new_mem) |
e024e110 | 737 | { |
d03d8589 | 738 | struct radeon_bo *rbo; |
67e8e3f9 | 739 | |
d03d8589 JG |
740 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
741 | return; | |
67e8e3f9 | 742 | |
d03d8589 | 743 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 744 | radeon_bo_check_tiling(rbo, 0, 1); |
721604a1 | 745 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
67e8e3f9 MO |
746 | |
747 | /* update statistics */ | |
748 | if (!new_mem) | |
749 | return; | |
750 | ||
751 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); | |
752 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); | |
e024e110 DA |
753 | } |
754 | ||
8e0310f0 | 755 | vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
e024e110 | 756 | { |
19be5570 | 757 | struct ttm_operation_ctx ctx = { false, false }; |
0a2d50e3 | 758 | struct radeon_device *rdev; |
d03d8589 | 759 | struct radeon_bo *rbo; |
c9da4a4b MD |
760 | unsigned long offset, size, lpfn; |
761 | int i, r; | |
0a2d50e3 | 762 | |
d03d8589 | 763 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
0a2d50e3 | 764 | return 0; |
d03d8589 | 765 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 766 | radeon_bo_check_tiling(rbo, 0, 0); |
0a2d50e3 | 767 | rdev = rbo->rdev; |
54409259 CK |
768 | if (bo->mem.mem_type != TTM_PL_VRAM) |
769 | return 0; | |
770 | ||
771 | size = bo->mem.num_pages << PAGE_SHIFT; | |
772 | offset = bo->mem.start << PAGE_SHIFT; | |
773 | if ((offset + size) <= rdev->mc.visible_vram_size) | |
774 | return 0; | |
775 | ||
e1a575ad | 776 | /* Can't move a pinned BO to visible VRAM */ |
0b8793f6 | 777 | if (rbo->tbo.pin_count > 0) |
8e0310f0 | 778 | return VM_FAULT_SIGBUS; |
e1a575ad | 779 | |
54409259 CK |
780 | /* hurrah the memory is not visible ! */ |
781 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | |
c9da4a4b MD |
782 | lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
783 | for (i = 0; i < rbo->placement.num_placement; i++) { | |
784 | /* Force into visible VRAM */ | |
48e07c23 | 785 | if ((rbo->placements[i].mem_type == TTM_PL_VRAM) && |
c9da4a4b MD |
786 | (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) |
787 | rbo->placements[i].lpfn = lpfn; | |
788 | } | |
19be5570 | 789 | r = ttm_bo_validate(bo, &rbo->placement, &ctx); |
54409259 CK |
790 | if (unlikely(r == -ENOMEM)) { |
791 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
8e0310f0 CK |
792 | r = ttm_bo_validate(bo, &rbo->placement, &ctx); |
793 | } else if (likely(!r)) { | |
794 | offset = bo->mem.start << PAGE_SHIFT; | |
795 | /* this should never happen */ | |
796 | if ((offset + size) > rdev->mc.visible_vram_size) | |
797 | return VM_FAULT_SIGBUS; | |
0a2d50e3 | 798 | } |
54409259 | 799 | |
8e0310f0 CK |
800 | if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) |
801 | return VM_FAULT_NOPAGE; | |
802 | else if (unlikely(r)) | |
803 | return VM_FAULT_SIGBUS; | |
54409259 | 804 | |
8e0310f0 | 805 | ttm_bo_move_to_lru_tail_unlocked(bo); |
0a2d50e3 | 806 | return 0; |
e024e110 | 807 | } |
ce580fab | 808 | |
587cdda8 CK |
809 | /** |
810 | * radeon_bo_fence - add fence to buffer object | |
811 | * | |
812 | * @bo: buffer object in question | |
813 | * @fence: fence to add | |
814 | * @shared: true if fence should be added shared | |
815 | * | |
816 | */ | |
817 | void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, | |
3cf8bb1a | 818 | bool shared) |
587cdda8 | 819 | { |
52791eee | 820 | struct dma_resv *resv = bo->tbo.base.resv; |
587cdda8 CK |
821 | |
822 | if (shared) | |
52791eee | 823 | dma_resv_add_shared_fence(resv, &fence->base); |
587cdda8 | 824 | else |
52791eee | 825 | dma_resv_add_excl_fence(resv, &fence->base); |
587cdda8 | 826 | } |