]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
5a0e3ad6 | 33 | #include <linux/slab.h> |
771fe6b9 | 34 | #include <drm/drmP.h> |
760285e7 | 35 | #include <drm/radeon_drm.h> |
771fe6b9 | 36 | #include "radeon.h" |
99ee7fac | 37 | #include "radeon_trace.h" |
771fe6b9 | 38 | |
771fe6b9 JG |
39 | |
40 | int radeon_ttm_init(struct radeon_device *rdev); | |
41 | void radeon_ttm_fini(struct radeon_device *rdev); | |
4c788679 | 42 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
771fe6b9 JG |
43 | |
44 | /* | |
45 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | |
46 | * function are calling it. | |
47 | */ | |
48 | ||
2f43651c | 49 | static void radeon_bo_clear_va(struct radeon_bo *bo) |
721604a1 JG |
50 | { |
51 | struct radeon_bo_va *bo_va, *tmp; | |
52 | ||
53 | list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { | |
54 | /* remove from all vm address space */ | |
e971bd5e | 55 | radeon_vm_bo_rmv(bo->rdev, bo_va); |
721604a1 JG |
56 | } |
57 | } | |
58 | ||
67e8e3f9 MO |
59 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
60 | unsigned mem_type, int sign) | |
61 | { | |
62 | struct radeon_device *rdev = bo->rdev; | |
63 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; | |
64 | ||
65 | switch (mem_type) { | |
66 | case TTM_PL_TT: | |
67 | if (sign > 0) | |
68 | atomic64_add(size, &rdev->gtt_usage); | |
69 | else | |
70 | atomic64_sub(size, &rdev->gtt_usage); | |
71 | break; | |
72 | case TTM_PL_VRAM: | |
73 | if (sign > 0) | |
74 | atomic64_add(size, &rdev->vram_usage); | |
75 | else | |
76 | atomic64_sub(size, &rdev->vram_usage); | |
77 | break; | |
78 | } | |
79 | } | |
80 | ||
4c788679 | 81 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
771fe6b9 | 82 | { |
4c788679 | 83 | struct radeon_bo *bo; |
771fe6b9 | 84 | |
4c788679 | 85 | bo = container_of(tbo, struct radeon_bo, tbo); |
67e8e3f9 MO |
86 | |
87 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); | |
88 | ||
4c788679 JG |
89 | mutex_lock(&bo->rdev->gem.mutex); |
90 | list_del_init(&bo->list); | |
91 | mutex_unlock(&bo->rdev->gem.mutex); | |
92 | radeon_bo_clear_surface_reg(bo); | |
721604a1 | 93 | radeon_bo_clear_va(bo); |
441921d5 | 94 | drm_gem_object_release(&bo->gem_base); |
4c788679 | 95 | kfree(bo); |
771fe6b9 JG |
96 | } |
97 | ||
d03d8589 JG |
98 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
99 | { | |
100 | if (bo->destroy == &radeon_ttm_bo_destroy) | |
101 | return true; | |
102 | return false; | |
103 | } | |
104 | ||
312ea8da JG |
105 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
106 | { | |
deadcb36 | 107 | u32 c = 0, i; |
312ea8da JG |
108 | |
109 | rbo->placement.fpfn = 0; | |
93225b0d | 110 | rbo->placement.lpfn = 0; |
312ea8da | 111 | rbo->placement.placement = rbo->placements; |
20707874 | 112 | rbo->placement.busy_placement = rbo->placements; |
312ea8da JG |
113 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
114 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
115 | TTM_PL_FLAG_VRAM; | |
0d0b3e74 | 116 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
02376d82 MD |
117 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
118 | rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; | |
119 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || | |
120 | (rbo->rdev->flags & RADEON_IS_AGP)) { | |
121 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
122 | TTM_PL_FLAG_TT; | |
0d0b3e74 JG |
123 | } else { |
124 | rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; | |
125 | } | |
126 | } | |
127 | if (domain & RADEON_GEM_DOMAIN_CPU) { | |
02376d82 MD |
128 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
129 | rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; | |
130 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || | |
131 | rbo->rdev->flags & RADEON_IS_AGP) { | |
132 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
133 | TTM_PL_FLAG_SYSTEM; | |
0d0b3e74 | 134 | } else { |
dd54fee7 | 135 | rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; |
0d0b3e74 JG |
136 | } |
137 | } | |
9fb03e63 JG |
138 | if (!c) |
139 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
312ea8da JG |
140 | rbo->placement.num_placement = c; |
141 | rbo->placement.num_busy_placement = c; | |
deadcb36 LK |
142 | |
143 | /* | |
144 | * Use two-ended allocation depending on the buffer size to | |
145 | * improve fragmentation quality. | |
146 | * 512kb was measured as the most optimal number. | |
147 | */ | |
148 | if (rbo->tbo.mem.size > 512 * 1024) { | |
149 | for (i = 0; i < c; i++) { | |
150 | rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; | |
151 | } | |
152 | } | |
312ea8da JG |
153 | } |
154 | ||
441921d5 | 155 | int radeon_bo_create(struct radeon_device *rdev, |
268b2510 | 156 | unsigned long size, int byte_align, bool kernel, u32 domain, |
02376d82 | 157 | u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) |
771fe6b9 | 158 | { |
4c788679 | 159 | struct radeon_bo *bo; |
771fe6b9 | 160 | enum ttm_bo_type type; |
93225b0d | 161 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
57de4ba9 | 162 | size_t acc_size; |
771fe6b9 JG |
163 | int r; |
164 | ||
441921d5 DV |
165 | size = ALIGN(size, PAGE_SIZE); |
166 | ||
771fe6b9 JG |
167 | if (kernel) { |
168 | type = ttm_bo_type_kernel; | |
40f5cf99 AD |
169 | } else if (sg) { |
170 | type = ttm_bo_type_sg; | |
771fe6b9 JG |
171 | } else { |
172 | type = ttm_bo_type_device; | |
173 | } | |
4c788679 | 174 | *bo_ptr = NULL; |
2b66b50b | 175 | |
57de4ba9 JG |
176 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
177 | sizeof(struct radeon_bo)); | |
178 | ||
4c788679 JG |
179 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
180 | if (bo == NULL) | |
771fe6b9 | 181 | return -ENOMEM; |
441921d5 DV |
182 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); |
183 | if (unlikely(r)) { | |
184 | kfree(bo); | |
185 | return r; | |
186 | } | |
4c788679 | 187 | bo->rdev = rdev; |
4c788679 JG |
188 | bo->surface_reg = -1; |
189 | INIT_LIST_HEAD(&bo->list); | |
721604a1 | 190 | INIT_LIST_HEAD(&bo->va); |
bda72d58 MO |
191 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
192 | RADEON_GEM_DOMAIN_GTT | | |
193 | RADEON_GEM_DOMAIN_CPU); | |
02376d82 MD |
194 | |
195 | bo->flags = flags; | |
196 | /* PCI GART is always snooped */ | |
197 | if (!(rdev->flags & RADEON_IS_PCIE)) | |
198 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | |
199 | ||
1fb107fc | 200 | radeon_ttm_placement_from_domain(bo, domain); |
5cc6fbab | 201 | /* Kernel allocation are uninterruptible */ |
db7fce39 | 202 | down_read(&rdev->pm.mclk_lock); |
1fb107fc | 203 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
0b91c4a1 | 204 | &bo->placement, page_align, !kernel, NULL, |
40f5cf99 | 205 | acc_size, sg, &radeon_ttm_bo_destroy); |
db7fce39 | 206 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 | 207 | if (unlikely(r != 0)) { |
771fe6b9 JG |
208 | return r; |
209 | } | |
4c788679 | 210 | *bo_ptr = bo; |
441921d5 | 211 | |
99ee7fac | 212 | trace_radeon_bo_create(bo); |
441921d5 | 213 | |
771fe6b9 JG |
214 | return 0; |
215 | } | |
216 | ||
4c788679 | 217 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
771fe6b9 | 218 | { |
4c788679 | 219 | bool is_iomem; |
771fe6b9 JG |
220 | int r; |
221 | ||
4c788679 | 222 | if (bo->kptr) { |
771fe6b9 | 223 | if (ptr) { |
4c788679 | 224 | *ptr = bo->kptr; |
771fe6b9 | 225 | } |
771fe6b9 JG |
226 | return 0; |
227 | } | |
4c788679 | 228 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
771fe6b9 JG |
229 | if (r) { |
230 | return r; | |
231 | } | |
4c788679 | 232 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
771fe6b9 | 233 | if (ptr) { |
4c788679 | 234 | *ptr = bo->kptr; |
771fe6b9 | 235 | } |
4c788679 | 236 | radeon_bo_check_tiling(bo, 0, 0); |
771fe6b9 JG |
237 | return 0; |
238 | } | |
239 | ||
4c788679 | 240 | void radeon_bo_kunmap(struct radeon_bo *bo) |
771fe6b9 | 241 | { |
4c788679 | 242 | if (bo->kptr == NULL) |
771fe6b9 | 243 | return; |
4c788679 JG |
244 | bo->kptr = NULL; |
245 | radeon_bo_check_tiling(bo, 0, 0); | |
246 | ttm_bo_kunmap(&bo->kmap); | |
771fe6b9 JG |
247 | } |
248 | ||
4c788679 | 249 | void radeon_bo_unref(struct radeon_bo **bo) |
771fe6b9 | 250 | { |
4c788679 | 251 | struct ttm_buffer_object *tbo; |
f4b7fb94 | 252 | struct radeon_device *rdev; |
771fe6b9 | 253 | |
4c788679 | 254 | if ((*bo) == NULL) |
771fe6b9 | 255 | return; |
f4b7fb94 | 256 | rdev = (*bo)->rdev; |
4c788679 | 257 | tbo = &((*bo)->tbo); |
db7fce39 | 258 | down_read(&rdev->pm.mclk_lock); |
4c788679 | 259 | ttm_bo_unref(&tbo); |
db7fce39 | 260 | up_read(&rdev->pm.mclk_lock); |
4c788679 JG |
261 | if (tbo == NULL) |
262 | *bo = NULL; | |
771fe6b9 JG |
263 | } |
264 | ||
c4353016 MD |
265 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
266 | u64 *gpu_addr) | |
771fe6b9 | 267 | { |
312ea8da | 268 | int r, i; |
771fe6b9 | 269 | |
4c788679 JG |
270 | if (bo->pin_count) { |
271 | bo->pin_count++; | |
272 | if (gpu_addr) | |
273 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
d936622c MD |
274 | |
275 | if (max_offset != 0) { | |
276 | u64 domain_start; | |
277 | ||
278 | if (domain == RADEON_GEM_DOMAIN_VRAM) | |
279 | domain_start = bo->rdev->mc.vram_start; | |
280 | else | |
281 | domain_start = bo->rdev->mc.gtt_start; | |
e199fd42 MD |
282 | WARN_ON_ONCE(max_offset < |
283 | (radeon_bo_gpu_offset(bo) - domain_start)); | |
d936622c MD |
284 | } |
285 | ||
771fe6b9 JG |
286 | return 0; |
287 | } | |
312ea8da | 288 | radeon_ttm_placement_from_domain(bo, domain); |
3ca82da3 MD |
289 | if (domain == RADEON_GEM_DOMAIN_VRAM) { |
290 | /* force to pin into visible video ram */ | |
291 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
292 | } | |
c4353016 MD |
293 | if (max_offset) { |
294 | u64 lpfn = max_offset >> PAGE_SHIFT; | |
295 | ||
296 | if (!bo->placement.lpfn) | |
297 | bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; | |
298 | ||
299 | if (lpfn < bo->placement.lpfn) | |
300 | bo->placement.lpfn = lpfn; | |
301 | } | |
312ea8da JG |
302 | for (i = 0; i < bo->placement.num_placement; i++) |
303 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | |
97a875cb | 304 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
4c788679 JG |
305 | if (likely(r == 0)) { |
306 | bo->pin_count = 1; | |
307 | if (gpu_addr != NULL) | |
308 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
771fe6b9 | 309 | } |
5cc6fbab | 310 | if (unlikely(r != 0)) |
4c788679 | 311 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
771fe6b9 JG |
312 | return r; |
313 | } | |
c4353016 MD |
314 | |
315 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |
316 | { | |
317 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); | |
318 | } | |
771fe6b9 | 319 | |
4c788679 | 320 | int radeon_bo_unpin(struct radeon_bo *bo) |
771fe6b9 | 321 | { |
312ea8da | 322 | int r, i; |
771fe6b9 | 323 | |
4c788679 JG |
324 | if (!bo->pin_count) { |
325 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); | |
326 | return 0; | |
771fe6b9 | 327 | } |
4c788679 JG |
328 | bo->pin_count--; |
329 | if (bo->pin_count) | |
330 | return 0; | |
312ea8da JG |
331 | for (i = 0; i < bo->placement.num_placement; i++) |
332 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | |
97a875cb | 333 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
5cc6fbab | 334 | if (unlikely(r != 0)) |
4c788679 | 335 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); |
5cc6fbab | 336 | return r; |
cefb87ef DA |
337 | } |
338 | ||
4c788679 | 339 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
771fe6b9 | 340 | { |
d796d844 DA |
341 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
342 | if (0 && (rdev->flags & RADEON_IS_IGP)) { | |
06b6476d AD |
343 | if (rdev->mc.igp_sideport_enabled == false) |
344 | /* Useless to evict on IGP chips */ | |
345 | return 0; | |
771fe6b9 JG |
346 | } |
347 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
348 | } | |
349 | ||
4c788679 | 350 | void radeon_bo_force_delete(struct radeon_device *rdev) |
771fe6b9 | 351 | { |
4c788679 | 352 | struct radeon_bo *bo, *n; |
771fe6b9 JG |
353 | |
354 | if (list_empty(&rdev->gem.objects)) { | |
355 | return; | |
356 | } | |
4c788679 JG |
357 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
358 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | |
771fe6b9 | 359 | mutex_lock(&rdev->ddev->struct_mutex); |
4c788679 | 360 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
31c3603d DV |
361 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
362 | *((unsigned long *)&bo->gem_base.refcount)); | |
4c788679 JG |
363 | mutex_lock(&bo->rdev->gem.mutex); |
364 | list_del_init(&bo->list); | |
365 | mutex_unlock(&bo->rdev->gem.mutex); | |
91132d6b | 366 | /* this should unref the ttm bo */ |
31c3603d | 367 | drm_gem_object_unreference(&bo->gem_base); |
771fe6b9 JG |
368 | mutex_unlock(&rdev->ddev->struct_mutex); |
369 | } | |
370 | } | |
371 | ||
4c788679 | 372 | int radeon_bo_init(struct radeon_device *rdev) |
771fe6b9 | 373 | { |
a4d68279 | 374 | /* Add an MTRR for the VRAM */ |
a0a53aa8 | 375 | if (!rdev->fastfb_working) { |
07ebea25 AL |
376 | rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, |
377 | rdev->mc.aper_size); | |
a0a53aa8 | 378 | } |
a4d68279 JG |
379 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
380 | rdev->mc.mc_vram_size >> 20, | |
381 | (unsigned long long)rdev->mc.aper_size >> 20); | |
382 | DRM_INFO("RAM width %dbits %cDR\n", | |
383 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | |
771fe6b9 JG |
384 | return radeon_ttm_init(rdev); |
385 | } | |
386 | ||
4c788679 | 387 | void radeon_bo_fini(struct radeon_device *rdev) |
771fe6b9 JG |
388 | { |
389 | radeon_ttm_fini(rdev); | |
07ebea25 | 390 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
771fe6b9 JG |
391 | } |
392 | ||
19dff56a MO |
393 | /* Returns how many bytes TTM can move per IB. |
394 | */ | |
395 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) | |
396 | { | |
397 | u64 real_vram_size = rdev->mc.real_vram_size; | |
398 | u64 vram_usage = atomic64_read(&rdev->vram_usage); | |
399 | ||
400 | /* This function is based on the current VRAM usage. | |
401 | * | |
402 | * - If all of VRAM is free, allow relocating the number of bytes that | |
403 | * is equal to 1/4 of the size of VRAM for this IB. | |
404 | ||
405 | * - If more than one half of VRAM is occupied, only allow relocating | |
406 | * 1 MB of data for this IB. | |
407 | * | |
408 | * - From 0 to one half of used VRAM, the threshold decreases | |
409 | * linearly. | |
410 | * __________________ | |
411 | * 1/4 of -|\ | | |
412 | * VRAM | \ | | |
413 | * | \ | | |
414 | * | \ | | |
415 | * | \ | | |
416 | * | \ | | |
417 | * | \ | | |
418 | * | \________|1 MB | |
419 | * |----------------| | |
420 | * VRAM 0 % 100 % | |
421 | * used used | |
422 | * | |
423 | * Note: It's a threshold, not a limit. The threshold must be crossed | |
424 | * for buffer relocations to stop, so any buffer of an arbitrary size | |
425 | * can be moved as long as the threshold isn't crossed before | |
426 | * the relocation takes place. We don't want to disable buffer | |
427 | * relocations completely. | |
428 | * | |
429 | * The idea is that buffers should be placed in VRAM at creation time | |
430 | * and TTM should only do a minimum number of relocations during | |
431 | * command submission. In practice, you need to submit at least | |
432 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | |
433 | * | |
434 | * Also, things can get pretty crazy under memory pressure and actual | |
435 | * VRAM usage can change a lot, so playing safe even at 50% does | |
436 | * consistently increase performance. | |
437 | */ | |
438 | ||
439 | u64 half_vram = real_vram_size >> 1; | |
440 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | |
441 | u64 bytes_moved_threshold = half_free_vram >> 1; | |
442 | return max(bytes_moved_threshold, 1024*1024ull); | |
443 | } | |
444 | ||
445 | int radeon_bo_list_validate(struct radeon_device *rdev, | |
446 | struct ww_acquire_ctx *ticket, | |
ecff665f | 447 | struct list_head *head, int ring) |
771fe6b9 | 448 | { |
df0af440 | 449 | struct radeon_cs_reloc *lobj; |
4c788679 | 450 | struct radeon_bo *bo; |
771fe6b9 | 451 | int r; |
19dff56a MO |
452 | u64 bytes_moved = 0, initial_bytes_moved; |
453 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); | |
771fe6b9 | 454 | |
ecff665f | 455 | r = ttm_eu_reserve_buffers(ticket, head); |
771fe6b9 | 456 | if (unlikely(r != 0)) { |
771fe6b9 JG |
457 | return r; |
458 | } | |
19dff56a | 459 | |
147666fb | 460 | list_for_each_entry(lobj, head, tv.head) { |
df0af440 | 461 | bo = lobj->robj; |
4c788679 | 462 | if (!bo->pin_count) { |
ce6758c8 | 463 | u32 domain = lobj->prefered_domains; |
19dff56a MO |
464 | u32 current_domain = |
465 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); | |
466 | ||
467 | /* Check if this buffer will be moved and don't move it | |
468 | * if we have moved too many buffers for this IB already. | |
469 | * | |
470 | * Note that this allows moving at least one buffer of | |
471 | * any size, because it doesn't take the current "bo" | |
472 | * into account. We don't want to disallow buffer moves | |
473 | * completely. | |
474 | */ | |
ce6758c8 | 475 | if ((lobj->allowed_domains & current_domain) != 0 && |
19dff56a MO |
476 | (domain & current_domain) == 0 && /* will be moved */ |
477 | bytes_moved > bytes_moved_threshold) { | |
478 | /* don't move it */ | |
479 | domain = current_domain; | |
480 | } | |
481 | ||
20707874 AD |
482 | retry: |
483 | radeon_ttm_placement_from_domain(bo, domain); | |
f2ba57b5 CK |
484 | if (ring == R600_RING_TYPE_UVD_INDEX) |
485 | radeon_uvd_force_into_uvd_segment(bo); | |
19dff56a MO |
486 | |
487 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); | |
488 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
489 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - | |
490 | initial_bytes_moved; | |
491 | ||
e376573f | 492 | if (unlikely(r)) { |
ce6758c8 CK |
493 | if (r != -ERESTARTSYS && |
494 | domain != lobj->allowed_domains) { | |
495 | domain = lobj->allowed_domains; | |
20707874 AD |
496 | goto retry; |
497 | } | |
1b6e5fd5 | 498 | ttm_eu_backoff_reservation(ticket, head); |
771fe6b9 | 499 | return r; |
e376573f | 500 | } |
771fe6b9 | 501 | } |
4c788679 JG |
502 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
503 | lobj->tiling_flags = bo->tiling_flags; | |
771fe6b9 JG |
504 | } |
505 | return 0; | |
506 | } | |
507 | ||
4c788679 | 508 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
771fe6b9 JG |
509 | struct vm_area_struct *vma) |
510 | { | |
4c788679 | 511 | return ttm_fbdev_mmap(vma, &bo->tbo); |
771fe6b9 JG |
512 | } |
513 | ||
550e2d92 | 514 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
771fe6b9 | 515 | { |
4c788679 | 516 | struct radeon_device *rdev = bo->rdev; |
e024e110 | 517 | struct radeon_surface_reg *reg; |
4c788679 | 518 | struct radeon_bo *old_object; |
e024e110 DA |
519 | int steal; |
520 | int i; | |
521 | ||
977c38d5 | 522 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
4c788679 JG |
523 | |
524 | if (!bo->tiling_flags) | |
e024e110 DA |
525 | return 0; |
526 | ||
4c788679 JG |
527 | if (bo->surface_reg >= 0) { |
528 | reg = &rdev->surface_regs[bo->surface_reg]; | |
529 | i = bo->surface_reg; | |
e024e110 DA |
530 | goto out; |
531 | } | |
532 | ||
533 | steal = -1; | |
534 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
535 | ||
536 | reg = &rdev->surface_regs[i]; | |
4c788679 | 537 | if (!reg->bo) |
e024e110 DA |
538 | break; |
539 | ||
4c788679 | 540 | old_object = reg->bo; |
e024e110 DA |
541 | if (old_object->pin_count == 0) |
542 | steal = i; | |
543 | } | |
544 | ||
545 | /* if we are all out */ | |
546 | if (i == RADEON_GEM_MAX_SURFACES) { | |
547 | if (steal == -1) | |
548 | return -ENOMEM; | |
549 | /* find someone with a surface reg and nuke their BO */ | |
550 | reg = &rdev->surface_regs[steal]; | |
4c788679 | 551 | old_object = reg->bo; |
e024e110 DA |
552 | /* blow away the mapping */ |
553 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | |
4c788679 | 554 | ttm_bo_unmap_virtual(&old_object->tbo); |
e024e110 DA |
555 | old_object->surface_reg = -1; |
556 | i = steal; | |
557 | } | |
558 | ||
4c788679 JG |
559 | bo->surface_reg = i; |
560 | reg->bo = bo; | |
e024e110 DA |
561 | |
562 | out: | |
4c788679 | 563 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
d961db75 | 564 | bo->tbo.mem.start << PAGE_SHIFT, |
4c788679 | 565 | bo->tbo.num_pages << PAGE_SHIFT); |
e024e110 DA |
566 | return 0; |
567 | } | |
568 | ||
4c788679 | 569 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
e024e110 | 570 | { |
4c788679 | 571 | struct radeon_device *rdev = bo->rdev; |
e024e110 DA |
572 | struct radeon_surface_reg *reg; |
573 | ||
4c788679 | 574 | if (bo->surface_reg == -1) |
e024e110 DA |
575 | return; |
576 | ||
4c788679 JG |
577 | reg = &rdev->surface_regs[bo->surface_reg]; |
578 | radeon_clear_surface_reg(rdev, bo->surface_reg); | |
e024e110 | 579 | |
4c788679 JG |
580 | reg->bo = NULL; |
581 | bo->surface_reg = -1; | |
e024e110 DA |
582 | } |
583 | ||
4c788679 JG |
584 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
585 | uint32_t tiling_flags, uint32_t pitch) | |
e024e110 | 586 | { |
285484e2 | 587 | struct radeon_device *rdev = bo->rdev; |
4c788679 JG |
588 | int r; |
589 | ||
285484e2 JG |
590 | if (rdev->family >= CHIP_CEDAR) { |
591 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | |
592 | ||
593 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | |
594 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | |
595 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | |
596 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | |
597 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; | |
598 | switch (bankw) { | |
599 | case 0: | |
600 | case 1: | |
601 | case 2: | |
602 | case 4: | |
603 | case 8: | |
604 | break; | |
605 | default: | |
606 | return -EINVAL; | |
607 | } | |
608 | switch (bankh) { | |
609 | case 0: | |
610 | case 1: | |
611 | case 2: | |
612 | case 4: | |
613 | case 8: | |
614 | break; | |
615 | default: | |
616 | return -EINVAL; | |
617 | } | |
618 | switch (mtaspect) { | |
619 | case 0: | |
620 | case 1: | |
621 | case 2: | |
622 | case 4: | |
623 | case 8: | |
624 | break; | |
625 | default: | |
626 | return -EINVAL; | |
627 | } | |
628 | if (tilesplit > 6) { | |
629 | return -EINVAL; | |
630 | } | |
631 | if (stilesplit > 6) { | |
632 | return -EINVAL; | |
633 | } | |
634 | } | |
4c788679 JG |
635 | r = radeon_bo_reserve(bo, false); |
636 | if (unlikely(r != 0)) | |
637 | return r; | |
638 | bo->tiling_flags = tiling_flags; | |
639 | bo->pitch = pitch; | |
640 | radeon_bo_unreserve(bo); | |
641 | return 0; | |
e024e110 DA |
642 | } |
643 | ||
4c788679 JG |
644 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
645 | uint32_t *tiling_flags, | |
646 | uint32_t *pitch) | |
e024e110 | 647 | { |
977c38d5 ML |
648 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
649 | ||
e024e110 | 650 | if (tiling_flags) |
4c788679 | 651 | *tiling_flags = bo->tiling_flags; |
e024e110 | 652 | if (pitch) |
4c788679 | 653 | *pitch = bo->pitch; |
e024e110 DA |
654 | } |
655 | ||
4c788679 JG |
656 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
657 | bool force_drop) | |
e024e110 | 658 | { |
977c38d5 ML |
659 | if (!force_drop) |
660 | lockdep_assert_held(&bo->tbo.resv->lock.base); | |
4c788679 JG |
661 | |
662 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | |
e024e110 DA |
663 | return 0; |
664 | ||
665 | if (force_drop) { | |
4c788679 | 666 | radeon_bo_clear_surface_reg(bo); |
e024e110 DA |
667 | return 0; |
668 | } | |
669 | ||
4c788679 | 670 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
e024e110 DA |
671 | if (!has_moved) |
672 | return 0; | |
673 | ||
4c788679 JG |
674 | if (bo->surface_reg >= 0) |
675 | radeon_bo_clear_surface_reg(bo); | |
e024e110 DA |
676 | return 0; |
677 | } | |
678 | ||
4c788679 | 679 | if ((bo->surface_reg >= 0) && !has_moved) |
e024e110 DA |
680 | return 0; |
681 | ||
4c788679 | 682 | return radeon_bo_get_surface_reg(bo); |
e024e110 DA |
683 | } |
684 | ||
685 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |
67e8e3f9 | 686 | struct ttm_mem_reg *new_mem) |
e024e110 | 687 | { |
d03d8589 | 688 | struct radeon_bo *rbo; |
67e8e3f9 | 689 | |
d03d8589 JG |
690 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
691 | return; | |
67e8e3f9 | 692 | |
d03d8589 | 693 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 694 | radeon_bo_check_tiling(rbo, 0, 1); |
721604a1 | 695 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
67e8e3f9 MO |
696 | |
697 | /* update statistics */ | |
698 | if (!new_mem) | |
699 | return; | |
700 | ||
701 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); | |
702 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); | |
e024e110 DA |
703 | } |
704 | ||
0a2d50e3 | 705 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
e024e110 | 706 | { |
0a2d50e3 | 707 | struct radeon_device *rdev; |
d03d8589 | 708 | struct radeon_bo *rbo; |
0a2d50e3 JG |
709 | unsigned long offset, size; |
710 | int r; | |
711 | ||
d03d8589 | 712 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
0a2d50e3 | 713 | return 0; |
d03d8589 | 714 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 715 | radeon_bo_check_tiling(rbo, 0, 0); |
0a2d50e3 | 716 | rdev = rbo->rdev; |
54409259 CK |
717 | if (bo->mem.mem_type != TTM_PL_VRAM) |
718 | return 0; | |
719 | ||
720 | size = bo->mem.num_pages << PAGE_SHIFT; | |
721 | offset = bo->mem.start << PAGE_SHIFT; | |
722 | if ((offset + size) <= rdev->mc.visible_vram_size) | |
723 | return 0; | |
724 | ||
725 | /* hurrah the memory is not visible ! */ | |
726 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | |
727 | rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
728 | r = ttm_bo_validate(bo, &rbo->placement, false, false); | |
729 | if (unlikely(r == -ENOMEM)) { | |
730 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
731 | return ttm_bo_validate(bo, &rbo->placement, false, false); | |
732 | } else if (unlikely(r != 0)) { | |
733 | return r; | |
0a2d50e3 | 734 | } |
54409259 CK |
735 | |
736 | offset = bo->mem.start << PAGE_SHIFT; | |
737 | /* this should never happen */ | |
738 | if ((offset + size) > rdev->mc.visible_vram_size) | |
739 | return -EINVAL; | |
740 | ||
0a2d50e3 | 741 | return 0; |
e024e110 | 742 | } |
ce580fab | 743 | |
83f30d0e | 744 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
ce580fab AK |
745 | { |
746 | int r; | |
747 | ||
12432354 | 748 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); |
ce580fab AK |
749 | if (unlikely(r != 0)) |
750 | return r; | |
751 | spin_lock(&bo->tbo.bdev->fence_lock); | |
752 | if (mem_type) | |
753 | *mem_type = bo->tbo.mem.mem_type; | |
754 | if (bo->tbo.sync_obj) | |
1717c0e2 | 755 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
ce580fab AK |
756 | spin_unlock(&bo->tbo.bdev->fence_lock); |
757 | ttm_bo_unreserve(&bo->tbo); | |
758 | return r; | |
759 | } |