]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
33 | #include <linux/slab.h> | |
34 | #include <drm/drmP.h> | |
35 | #include <drm/amdgpu_drm.h> | |
a187f17f | 36 | #include <drm/drm_cache.h> |
d38ceaf9 AD |
37 | #include "amdgpu.h" |
38 | #include "amdgpu_trace.h" | |
39 | ||
40 | ||
d38ceaf9 AD |
41 | |
42 | static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, | |
7e5a547f | 43 | struct ttm_mem_reg *mem) |
d38ceaf9 | 44 | { |
6681c5eb CK |
45 | if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size) |
46 | return 0; | |
47 | ||
48 | return ((mem->start << PAGE_SHIFT) + mem->size) > | |
49 | adev->mc.visible_vram_size ? | |
50 | adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : | |
51 | mem->size; | |
d38ceaf9 AD |
52 | } |
53 | ||
54 | static void amdgpu_update_memory_usage(struct amdgpu_device *adev, | |
55 | struct ttm_mem_reg *old_mem, | |
56 | struct ttm_mem_reg *new_mem) | |
57 | { | |
58 | u64 vis_size; | |
59 | if (!adev) | |
60 | return; | |
61 | ||
62 | if (new_mem) { | |
63 | switch (new_mem->mem_type) { | |
64 | case TTM_PL_TT: | |
65 | atomic64_add(new_mem->size, &adev->gtt_usage); | |
66 | break; | |
67 | case TTM_PL_VRAM: | |
68 | atomic64_add(new_mem->size, &adev->vram_usage); | |
69 | vis_size = amdgpu_get_vis_part_size(adev, new_mem); | |
70 | atomic64_add(vis_size, &adev->vram_vis_usage); | |
71 | break; | |
72 | } | |
73 | } | |
74 | ||
75 | if (old_mem) { | |
76 | switch (old_mem->mem_type) { | |
77 | case TTM_PL_TT: | |
78 | atomic64_sub(old_mem->size, &adev->gtt_usage); | |
79 | break; | |
80 | case TTM_PL_VRAM: | |
81 | atomic64_sub(old_mem->size, &adev->vram_usage); | |
82 | vis_size = amdgpu_get_vis_part_size(adev, old_mem); | |
83 | atomic64_sub(vis_size, &adev->vram_vis_usage); | |
84 | break; | |
85 | } | |
86 | } | |
87 | } | |
88 | ||
89 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |
90 | { | |
91 | struct amdgpu_bo *bo; | |
92 | ||
93 | bo = container_of(tbo, struct amdgpu_bo, tbo); | |
94 | ||
95 | amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); | |
d38ceaf9 | 96 | |
d38ceaf9 | 97 | drm_gem_object_release(&bo->gem_base); |
82b9c55b | 98 | amdgpu_bo_unref(&bo->parent); |
0c4e7fa5 CZ |
99 | if (!list_empty(&bo->shadow_list)) { |
100 | mutex_lock(&bo->adev->shadow_list_lock); | |
101 | list_del_init(&bo->shadow_list); | |
102 | mutex_unlock(&bo->adev->shadow_list_lock); | |
103 | } | |
d38ceaf9 AD |
104 | kfree(bo->metadata); |
105 | kfree(bo); | |
106 | } | |
107 | ||
108 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) | |
109 | { | |
110 | if (bo->destroy == &amdgpu_ttm_bo_destroy) | |
111 | return true; | |
112 | return false; | |
113 | } | |
114 | ||
7e5a547f CZ |
115 | static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, |
116 | struct ttm_placement *placement, | |
faceaf6a | 117 | struct ttm_place *places, |
7e5a547f | 118 | u32 domain, u64 flags) |
d38ceaf9 | 119 | { |
6369f6f1 | 120 | u32 c = 0; |
7e5a547f | 121 | |
d38ceaf9 | 122 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
faceaf6a CK |
123 | unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
124 | ||
7e5a547f | 125 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && |
6369f6f1 | 126 | !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
faceaf6a CK |
127 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { |
128 | places[c].fpfn = visible_pfn; | |
6369f6f1 | 129 | places[c].lpfn = 0; |
faceaf6a | 130 | places[c].flags = TTM_PL_FLAG_WC | |
6681c5eb CK |
131 | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | |
132 | TTM_PL_FLAG_TOPDOWN; | |
faceaf6a | 133 | c++; |
d38ceaf9 | 134 | } |
faceaf6a CK |
135 | |
136 | places[c].fpfn = 0; | |
137 | places[c].lpfn = 0; | |
138 | places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
7e5a547f | 139 | TTM_PL_FLAG_VRAM; |
faceaf6a CK |
140 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
141 | places[c].lpfn = visible_pfn; | |
142 | else | |
143 | places[c].flags |= TTM_PL_FLAG_TOPDOWN; | |
144 | c++; | |
d38ceaf9 AD |
145 | } |
146 | ||
147 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { | |
faceaf6a CK |
148 | places[c].fpfn = 0; |
149 | places[c].lpfn = 0; | |
150 | places[c].flags = TTM_PL_FLAG_TT; | |
151 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) | |
152 | places[c].flags |= TTM_PL_FLAG_WC | | |
153 | TTM_PL_FLAG_UNCACHED; | |
154 | else | |
155 | places[c].flags |= TTM_PL_FLAG_CACHED; | |
156 | c++; | |
d38ceaf9 AD |
157 | } |
158 | ||
159 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { | |
faceaf6a CK |
160 | places[c].fpfn = 0; |
161 | places[c].lpfn = 0; | |
162 | places[c].flags = TTM_PL_FLAG_SYSTEM; | |
163 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) | |
164 | places[c].flags |= TTM_PL_FLAG_WC | | |
165 | TTM_PL_FLAG_UNCACHED; | |
166 | else | |
167 | places[c].flags |= TTM_PL_FLAG_CACHED; | |
168 | c++; | |
d38ceaf9 AD |
169 | } |
170 | ||
171 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { | |
faceaf6a CK |
172 | places[c].fpfn = 0; |
173 | places[c].lpfn = 0; | |
174 | places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; | |
175 | c++; | |
d38ceaf9 | 176 | } |
faceaf6a | 177 | |
d38ceaf9 | 178 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { |
faceaf6a CK |
179 | places[c].fpfn = 0; |
180 | places[c].lpfn = 0; | |
181 | places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; | |
182 | c++; | |
d38ceaf9 | 183 | } |
faceaf6a | 184 | |
d38ceaf9 | 185 | if (domain & AMDGPU_GEM_DOMAIN_OA) { |
faceaf6a CK |
186 | places[c].fpfn = 0; |
187 | places[c].lpfn = 0; | |
188 | places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; | |
189 | c++; | |
d38ceaf9 AD |
190 | } |
191 | ||
192 | if (!c) { | |
faceaf6a CK |
193 | places[c].fpfn = 0; |
194 | places[c].lpfn = 0; | |
195 | places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
196 | c++; | |
d38ceaf9 | 197 | } |
faceaf6a | 198 | |
7e5a547f | 199 | placement->num_placement = c; |
faceaf6a | 200 | placement->placement = places; |
d38ceaf9 | 201 | |
faceaf6a CK |
202 | placement->num_busy_placement = c; |
203 | placement->busy_placement = places; | |
d38ceaf9 AD |
204 | } |
205 | ||
765e7fbf | 206 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
7e5a547f | 207 | { |
765e7fbf CK |
208 | amdgpu_ttm_placement_init(abo->adev, &abo->placement, |
209 | abo->placements, domain, abo->flags); | |
7e5a547f CZ |
210 | } |
211 | ||
212 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | |
213 | struct ttm_placement *placement) | |
214 | { | |
215 | BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1)); | |
216 | ||
217 | memcpy(bo->placements, placement->placement, | |
218 | placement->num_placement * sizeof(struct ttm_place)); | |
219 | bo->placement.num_placement = placement->num_placement; | |
220 | bo->placement.num_busy_placement = placement->num_busy_placement; | |
221 | bo->placement.placement = bo->placements; | |
222 | bo->placement.busy_placement = bo->placements; | |
223 | } | |
224 | ||
7c204889 CK |
225 | /** |
226 | * amdgpu_bo_create_kernel - create BO for kernel use | |
227 | * | |
228 | * @adev: amdgpu device object | |
229 | * @size: size for the new BO | |
230 | * @align: alignment for the new BO | |
231 | * @domain: where to place it | |
232 | * @bo_ptr: resulting BO | |
233 | * @gpu_addr: GPU addr of the pinned BO | |
234 | * @cpu_addr: optional CPU address mapping | |
235 | * | |
236 | * Allocates and pins a BO for kernel internal use. | |
237 | * | |
238 | * Returns 0 on success, negative error code otherwise. | |
239 | */ | |
240 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | |
241 | unsigned long size, int align, | |
242 | u32 domain, struct amdgpu_bo **bo_ptr, | |
243 | u64 *gpu_addr, void **cpu_addr) | |
244 | { | |
245 | int r; | |
246 | ||
247 | r = amdgpu_bo_create(adev, size, align, true, domain, | |
248 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
249 | NULL, NULL, bo_ptr); | |
250 | if (r) { | |
251 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); | |
252 | return r; | |
253 | } | |
254 | ||
255 | r = amdgpu_bo_reserve(*bo_ptr, false); | |
256 | if (r) { | |
257 | dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); | |
258 | goto error_free; | |
259 | } | |
260 | ||
261 | r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); | |
262 | if (r) { | |
263 | dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); | |
264 | goto error_unreserve; | |
265 | } | |
266 | ||
267 | if (cpu_addr) { | |
268 | r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); | |
269 | if (r) { | |
270 | dev_err(adev->dev, "(%d) kernel bo map failed\n", r); | |
271 | goto error_unreserve; | |
272 | } | |
273 | } | |
274 | ||
275 | amdgpu_bo_unreserve(*bo_ptr); | |
276 | ||
277 | return 0; | |
278 | ||
279 | error_unreserve: | |
280 | amdgpu_bo_unreserve(*bo_ptr); | |
281 | ||
282 | error_free: | |
283 | amdgpu_bo_unref(bo_ptr); | |
284 | ||
285 | return r; | |
286 | } | |
287 | ||
aa1d562e JZ |
288 | /** |
289 | * amdgpu_bo_free_kernel - free BO for kernel use | |
290 | * | |
291 | * @bo: amdgpu BO to free | |
292 | * | |
293 | * unmaps and unpin a BO for kernel internal use. | |
294 | */ | |
295 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, | |
296 | void **cpu_addr) | |
297 | { | |
298 | if (*bo == NULL) | |
299 | return; | |
300 | ||
301 | if (likely(amdgpu_bo_reserve(*bo, false) == 0)) { | |
302 | if (cpu_addr) | |
303 | amdgpu_bo_kunmap(*bo); | |
304 | ||
305 | amdgpu_bo_unpin(*bo); | |
306 | amdgpu_bo_unreserve(*bo); | |
307 | } | |
308 | amdgpu_bo_unref(bo); | |
309 | ||
310 | if (gpu_addr) | |
311 | *gpu_addr = 0; | |
312 | ||
313 | if (cpu_addr) | |
314 | *cpu_addr = NULL; | |
315 | } | |
316 | ||
7e5a547f CZ |
317 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, |
318 | unsigned long size, int byte_align, | |
319 | bool kernel, u32 domain, u64 flags, | |
320 | struct sg_table *sg, | |
321 | struct ttm_placement *placement, | |
72d7668b | 322 | struct reservation_object *resv, |
7e5a547f | 323 | struct amdgpu_bo **bo_ptr) |
d38ceaf9 AD |
324 | { |
325 | struct amdgpu_bo *bo; | |
326 | enum ttm_bo_type type; | |
327 | unsigned long page_align; | |
328 | size_t acc_size; | |
329 | int r; | |
330 | ||
d38ceaf9 AD |
331 | page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
332 | size = ALIGN(size, PAGE_SIZE); | |
333 | ||
334 | if (kernel) { | |
335 | type = ttm_bo_type_kernel; | |
336 | } else if (sg) { | |
337 | type = ttm_bo_type_sg; | |
338 | } else { | |
339 | type = ttm_bo_type_device; | |
340 | } | |
341 | *bo_ptr = NULL; | |
342 | ||
343 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, | |
344 | sizeof(struct amdgpu_bo)); | |
345 | ||
346 | bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); | |
347 | if (bo == NULL) | |
348 | return -ENOMEM; | |
349 | r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); | |
350 | if (unlikely(r)) { | |
351 | kfree(bo); | |
352 | return r; | |
353 | } | |
354 | bo->adev = adev; | |
0c4e7fa5 | 355 | INIT_LIST_HEAD(&bo->shadow_list); |
d38ceaf9 | 356 | INIT_LIST_HEAD(&bo->va); |
1ea863fd CK |
357 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
358 | AMDGPU_GEM_DOMAIN_GTT | | |
359 | AMDGPU_GEM_DOMAIN_CPU | | |
360 | AMDGPU_GEM_DOMAIN_GDS | | |
361 | AMDGPU_GEM_DOMAIN_GWS | | |
362 | AMDGPU_GEM_DOMAIN_OA); | |
363 | bo->allowed_domains = bo->prefered_domains; | |
364 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | |
365 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | |
d38ceaf9 AD |
366 | |
367 | bo->flags = flags; | |
a187f17f OG |
368 | |
369 | /* For architectures that don't support WC memory, | |
370 | * mask out the WC flag from the BO | |
371 | */ | |
372 | if (!drm_arch_can_wc_memory()) | |
373 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; | |
374 | ||
7e5a547f | 375 | amdgpu_fill_placement_to_bo(bo, placement); |
d38ceaf9 | 376 | /* Kernel allocation are uninterruptible */ |
d38ceaf9 AD |
377 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, |
378 | &bo->placement, page_align, !kernel, NULL, | |
72d7668b | 379 | acc_size, sg, resv, &amdgpu_ttm_bo_destroy); |
d38ceaf9 AD |
380 | if (unlikely(r != 0)) { |
381 | return r; | |
382 | } | |
4fea83ff FC |
383 | |
384 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && | |
385 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { | |
f54d1867 | 386 | struct dma_fence *fence; |
4fea83ff FC |
387 | |
388 | if (adev->mman.buffer_funcs_ring == NULL || | |
389 | !adev->mman.buffer_funcs_ring->ready) { | |
390 | r = -EBUSY; | |
391 | goto fail_free; | |
392 | } | |
393 | ||
394 | r = amdgpu_bo_reserve(bo, false); | |
395 | if (unlikely(r != 0)) | |
396 | goto fail_free; | |
397 | ||
398 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | |
399 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
400 | if (unlikely(r != 0)) | |
401 | goto fail_unreserve; | |
402 | ||
403 | amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); | |
404 | amdgpu_bo_fence(bo, fence, false); | |
405 | amdgpu_bo_unreserve(bo); | |
f54d1867 CW |
406 | dma_fence_put(bo->tbo.moving); |
407 | bo->tbo.moving = dma_fence_get(fence); | |
408 | dma_fence_put(fence); | |
4fea83ff | 409 | } |
d38ceaf9 AD |
410 | *bo_ptr = bo; |
411 | ||
412 | trace_amdgpu_bo_create(bo); | |
413 | ||
414 | return 0; | |
4fea83ff FC |
415 | |
416 | fail_unreserve: | |
417 | amdgpu_bo_unreserve(bo); | |
418 | fail_free: | |
419 | amdgpu_bo_unref(&bo); | |
420 | return r; | |
d38ceaf9 AD |
421 | } |
422 | ||
e7893c4b CZ |
423 | static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, |
424 | unsigned long size, int byte_align, | |
425 | struct amdgpu_bo *bo) | |
426 | { | |
427 | struct ttm_placement placement = {0}; | |
428 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | |
429 | int r; | |
430 | ||
431 | if (bo->shadow) | |
432 | return 0; | |
433 | ||
434 | bo->flags |= AMDGPU_GEM_CREATE_SHADOW; | |
435 | memset(&placements, 0, | |
436 | (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); | |
437 | ||
438 | amdgpu_ttm_placement_init(adev, &placement, | |
439 | placements, AMDGPU_GEM_DOMAIN_GTT, | |
440 | AMDGPU_GEM_CREATE_CPU_GTT_USWC); | |
441 | ||
442 | r = amdgpu_bo_create_restricted(adev, size, byte_align, true, | |
443 | AMDGPU_GEM_DOMAIN_GTT, | |
444 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, | |
445 | NULL, &placement, | |
446 | bo->tbo.resv, | |
447 | &bo->shadow); | |
0c4e7fa5 | 448 | if (!r) { |
e7893c4b | 449 | bo->shadow->parent = amdgpu_bo_ref(bo); |
0c4e7fa5 CZ |
450 | mutex_lock(&adev->shadow_list_lock); |
451 | list_add_tail(&bo->shadow_list, &adev->shadow_list); | |
452 | mutex_unlock(&adev->shadow_list_lock); | |
453 | } | |
e7893c4b CZ |
454 | |
455 | return r; | |
456 | } | |
457 | ||
7e5a547f CZ |
458 | int amdgpu_bo_create(struct amdgpu_device *adev, |
459 | unsigned long size, int byte_align, | |
460 | bool kernel, u32 domain, u64 flags, | |
72d7668b CK |
461 | struct sg_table *sg, |
462 | struct reservation_object *resv, | |
463 | struct amdgpu_bo **bo_ptr) | |
7e5a547f CZ |
464 | { |
465 | struct ttm_placement placement = {0}; | |
466 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | |
e7893c4b | 467 | int r; |
7e5a547f CZ |
468 | |
469 | memset(&placements, 0, | |
470 | (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); | |
471 | ||
472 | amdgpu_ttm_placement_init(adev, &placement, | |
473 | placements, domain, flags); | |
474 | ||
e7893c4b CZ |
475 | r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, |
476 | domain, flags, sg, &placement, | |
477 | resv, bo_ptr); | |
478 | if (r) | |
479 | return r; | |
480 | ||
3ad81f16 | 481 | if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { |
e7893c4b CZ |
482 | r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); |
483 | if (r) | |
484 | amdgpu_bo_unref(bo_ptr); | |
485 | } | |
486 | ||
487 | return r; | |
7e5a547f CZ |
488 | } |
489 | ||
20f4eff1 CZ |
490 | int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, |
491 | struct amdgpu_ring *ring, | |
492 | struct amdgpu_bo *bo, | |
493 | struct reservation_object *resv, | |
f54d1867 | 494 | struct dma_fence **fence, |
20f4eff1 CZ |
495 | bool direct) |
496 | ||
497 | { | |
498 | struct amdgpu_bo *shadow = bo->shadow; | |
499 | uint64_t bo_addr, shadow_addr; | |
500 | int r; | |
501 | ||
502 | if (!shadow) | |
503 | return -EINVAL; | |
504 | ||
505 | bo_addr = amdgpu_bo_gpu_offset(bo); | |
506 | shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); | |
507 | ||
508 | r = reservation_object_reserve_shared(bo->tbo.resv); | |
509 | if (r) | |
510 | goto err; | |
511 | ||
512 | r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, | |
513 | amdgpu_bo_size(bo), resv, fence, | |
514 | direct); | |
515 | if (!r) | |
516 | amdgpu_bo_fence(bo, *fence, true); | |
517 | ||
518 | err: | |
519 | return r; | |
520 | } | |
521 | ||
522 | int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, | |
523 | struct amdgpu_ring *ring, | |
524 | struct amdgpu_bo *bo, | |
525 | struct reservation_object *resv, | |
f54d1867 | 526 | struct dma_fence **fence, |
20f4eff1 CZ |
527 | bool direct) |
528 | ||
529 | { | |
530 | struct amdgpu_bo *shadow = bo->shadow; | |
531 | uint64_t bo_addr, shadow_addr; | |
532 | int r; | |
533 | ||
534 | if (!shadow) | |
535 | return -EINVAL; | |
536 | ||
537 | bo_addr = amdgpu_bo_gpu_offset(bo); | |
538 | shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); | |
539 | ||
540 | r = reservation_object_reserve_shared(bo->tbo.resv); | |
541 | if (r) | |
542 | goto err; | |
543 | ||
544 | r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, | |
545 | amdgpu_bo_size(bo), resv, fence, | |
546 | direct); | |
547 | if (!r) | |
548 | amdgpu_bo_fence(bo, *fence, true); | |
549 | ||
550 | err: | |
551 | return r; | |
552 | } | |
553 | ||
d38ceaf9 AD |
554 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
555 | { | |
556 | bool is_iomem; | |
587f3c70 | 557 | long r; |
d38ceaf9 | 558 | |
271c8125 CK |
559 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
560 | return -EPERM; | |
561 | ||
d38ceaf9 AD |
562 | if (bo->kptr) { |
563 | if (ptr) { | |
564 | *ptr = bo->kptr; | |
565 | } | |
566 | return 0; | |
567 | } | |
587f3c70 CK |
568 | |
569 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false, | |
570 | MAX_SCHEDULE_TIMEOUT); | |
571 | if (r < 0) | |
572 | return r; | |
573 | ||
d38ceaf9 | 574 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
587f3c70 | 575 | if (r) |
d38ceaf9 | 576 | return r; |
587f3c70 | 577 | |
d38ceaf9 | 578 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
587f3c70 | 579 | if (ptr) |
d38ceaf9 | 580 | *ptr = bo->kptr; |
587f3c70 | 581 | |
d38ceaf9 AD |
582 | return 0; |
583 | } | |
584 | ||
585 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) | |
586 | { | |
587 | if (bo->kptr == NULL) | |
588 | return; | |
589 | bo->kptr = NULL; | |
590 | ttm_bo_kunmap(&bo->kmap); | |
591 | } | |
592 | ||
593 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) | |
594 | { | |
595 | if (bo == NULL) | |
596 | return NULL; | |
597 | ||
598 | ttm_bo_reference(&bo->tbo); | |
599 | return bo; | |
600 | } | |
601 | ||
602 | void amdgpu_bo_unref(struct amdgpu_bo **bo) | |
603 | { | |
604 | struct ttm_buffer_object *tbo; | |
605 | ||
606 | if ((*bo) == NULL) | |
607 | return; | |
608 | ||
609 | tbo = &((*bo)->tbo); | |
610 | ttm_bo_unref(&tbo); | |
611 | if (tbo == NULL) | |
612 | *bo = NULL; | |
613 | } | |
614 | ||
7e5a547f CZ |
615 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
616 | u64 min_offset, u64 max_offset, | |
d38ceaf9 AD |
617 | u64 *gpu_addr) |
618 | { | |
619 | int r, i; | |
7e5a547f | 620 | unsigned fpfn, lpfn; |
d38ceaf9 | 621 | |
cc325d19 | 622 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) |
d38ceaf9 AD |
623 | return -EPERM; |
624 | ||
7e5a547f CZ |
625 | if (WARN_ON_ONCE(min_offset > max_offset)) |
626 | return -EINVAL; | |
627 | ||
d38ceaf9 | 628 | if (bo->pin_count) { |
408778e8 FC |
629 | uint32_t mem_type = bo->tbo.mem.mem_type; |
630 | ||
631 | if (domain != amdgpu_mem_type_to_domain(mem_type)) | |
632 | return -EINVAL; | |
633 | ||
d38ceaf9 AD |
634 | bo->pin_count++; |
635 | if (gpu_addr) | |
636 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | |
637 | ||
638 | if (max_offset != 0) { | |
27798e07 | 639 | u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; |
d38ceaf9 AD |
640 | WARN_ON_ONCE(max_offset < |
641 | (amdgpu_bo_gpu_offset(bo) - domain_start)); | |
642 | } | |
643 | ||
644 | return 0; | |
645 | } | |
646 | amdgpu_ttm_placement_from_domain(bo, domain); | |
647 | for (i = 0; i < bo->placement.num_placement; i++) { | |
648 | /* force to pin into visible video ram */ | |
649 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && | |
7e5a547f | 650 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && |
6681c5eb CK |
651 | (!max_offset || max_offset > |
652 | bo->adev->mc.visible_vram_size)) { | |
7e5a547f CZ |
653 | if (WARN_ON_ONCE(min_offset > |
654 | bo->adev->mc.visible_vram_size)) | |
655 | return -EINVAL; | |
656 | fpfn = min_offset >> PAGE_SHIFT; | |
657 | lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; | |
658 | } else { | |
659 | fpfn = min_offset >> PAGE_SHIFT; | |
660 | lpfn = max_offset >> PAGE_SHIFT; | |
661 | } | |
662 | if (fpfn > bo->placements[i].fpfn) | |
663 | bo->placements[i].fpfn = fpfn; | |
78d0e182 CK |
664 | if (!bo->placements[i].lpfn || |
665 | (lpfn && lpfn < bo->placements[i].lpfn)) | |
7e5a547f | 666 | bo->placements[i].lpfn = lpfn; |
d38ceaf9 AD |
667 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
668 | } | |
669 | ||
670 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
6681c5eb | 671 | if (unlikely(r)) { |
d38ceaf9 | 672 | dev_err(bo->adev->dev, "%p pin failed\n", bo); |
6681c5eb CK |
673 | goto error; |
674 | } | |
bb990bb0 | 675 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); |
c855e250 CK |
676 | if (unlikely(r)) { |
677 | dev_err(bo->adev->dev, "%p bind failed\n", bo); | |
678 | goto error; | |
679 | } | |
6681c5eb CK |
680 | |
681 | bo->pin_count = 1; | |
682 | if (gpu_addr != NULL) | |
683 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | |
684 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { | |
685 | bo->adev->vram_pin_size += amdgpu_bo_size(bo); | |
686 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | |
687 | bo->adev->invisible_pin_size += amdgpu_bo_size(bo); | |
32ab75f0 | 688 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
6681c5eb | 689 | bo->adev->gart_pin_size += amdgpu_bo_size(bo); |
d38ceaf9 | 690 | } |
6681c5eb CK |
691 | |
692 | error: | |
d38ceaf9 AD |
693 | return r; |
694 | } | |
695 | ||
696 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) | |
697 | { | |
7e5a547f | 698 | return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr); |
d38ceaf9 AD |
699 | } |
700 | ||
701 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) | |
702 | { | |
703 | int r, i; | |
704 | ||
705 | if (!bo->pin_count) { | |
706 | dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); | |
707 | return 0; | |
708 | } | |
709 | bo->pin_count--; | |
710 | if (bo->pin_count) | |
711 | return 0; | |
712 | for (i = 0; i < bo->placement.num_placement; i++) { | |
713 | bo->placements[i].lpfn = 0; | |
714 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | |
715 | } | |
716 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
6681c5eb | 717 | if (unlikely(r)) { |
d38ceaf9 | 718 | dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); |
6681c5eb | 719 | goto error; |
d38ceaf9 | 720 | } |
6681c5eb CK |
721 | |
722 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { | |
723 | bo->adev->vram_pin_size -= amdgpu_bo_size(bo); | |
724 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | |
725 | bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); | |
441f90ec | 726 | } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
6681c5eb CK |
727 | bo->adev->gart_pin_size -= amdgpu_bo_size(bo); |
728 | } | |
729 | ||
730 | error: | |
d38ceaf9 AD |
731 | return r; |
732 | } | |
733 | ||
734 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev) | |
735 | { | |
736 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ | |
2f7d10b3 | 737 | if (0 && (adev->flags & AMD_IS_APU)) { |
d38ceaf9 AD |
738 | /* Useless to evict on IGP chips */ |
739 | return 0; | |
740 | } | |
741 | return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); | |
742 | } | |
743 | ||
1f8628c7 AD |
744 | static const char *amdgpu_vram_names[] = { |
745 | "UNKNOWN", | |
746 | "GDDR1", | |
747 | "DDR2", | |
748 | "GDDR3", | |
749 | "GDDR4", | |
750 | "GDDR5", | |
751 | "HBM", | |
752 | "DDR3" | |
753 | }; | |
754 | ||
d38ceaf9 AD |
755 | int amdgpu_bo_init(struct amdgpu_device *adev) |
756 | { | |
757 | /* Add an MTRR for the VRAM */ | |
758 | adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, | |
759 | adev->mc.aper_size); | |
760 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", | |
761 | adev->mc.mc_vram_size >> 20, | |
762 | (unsigned long long)adev->mc.aper_size >> 20); | |
1f8628c7 AD |
763 | DRM_INFO("RAM width %dbits %s\n", |
764 | adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); | |
d38ceaf9 AD |
765 | return amdgpu_ttm_init(adev); |
766 | } | |
767 | ||
768 | void amdgpu_bo_fini(struct amdgpu_device *adev) | |
769 | { | |
770 | amdgpu_ttm_fini(adev); | |
771 | arch_phys_wc_del(adev->mc.vram_mtrr); | |
772 | } | |
773 | ||
774 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, | |
775 | struct vm_area_struct *vma) | |
776 | { | |
777 | return ttm_fbdev_mmap(vma, &bo->tbo); | |
778 | } | |
779 | ||
780 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) | |
781 | { | |
fbd76d59 | 782 | if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) |
d38ceaf9 | 783 | return -EINVAL; |
d38ceaf9 AD |
784 | |
785 | bo->tiling_flags = tiling_flags; | |
786 | return 0; | |
787 | } | |
788 | ||
789 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) | |
790 | { | |
791 | lockdep_assert_held(&bo->tbo.resv->lock.base); | |
792 | ||
793 | if (tiling_flags) | |
794 | *tiling_flags = bo->tiling_flags; | |
795 | } | |
796 | ||
797 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | |
798 | uint32_t metadata_size, uint64_t flags) | |
799 | { | |
800 | void *buffer; | |
801 | ||
802 | if (!metadata_size) { | |
803 | if (bo->metadata_size) { | |
804 | kfree(bo->metadata); | |
0092d3ed | 805 | bo->metadata = NULL; |
d38ceaf9 AD |
806 | bo->metadata_size = 0; |
807 | } | |
808 | return 0; | |
809 | } | |
810 | ||
811 | if (metadata == NULL) | |
812 | return -EINVAL; | |
813 | ||
71affda5 | 814 | buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); |
d38ceaf9 AD |
815 | if (buffer == NULL) |
816 | return -ENOMEM; | |
817 | ||
d38ceaf9 AD |
818 | kfree(bo->metadata); |
819 | bo->metadata_flags = flags; | |
820 | bo->metadata = buffer; | |
821 | bo->metadata_size = metadata_size; | |
822 | ||
823 | return 0; | |
824 | } | |
825 | ||
826 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |
827 | size_t buffer_size, uint32_t *metadata_size, | |
828 | uint64_t *flags) | |
829 | { | |
830 | if (!buffer && !metadata_size) | |
831 | return -EINVAL; | |
832 | ||
833 | if (buffer) { | |
834 | if (buffer_size < bo->metadata_size) | |
835 | return -EINVAL; | |
836 | ||
837 | if (bo->metadata_size) | |
838 | memcpy(buffer, bo->metadata, bo->metadata_size); | |
839 | } | |
840 | ||
841 | if (metadata_size) | |
842 | *metadata_size = bo->metadata_size; | |
843 | if (flags) | |
844 | *flags = bo->metadata_flags; | |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
849 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | |
850 | struct ttm_mem_reg *new_mem) | |
851 | { | |
765e7fbf | 852 | struct amdgpu_bo *abo; |
15da301d | 853 | struct ttm_mem_reg *old_mem = &bo->mem; |
d38ceaf9 AD |
854 | |
855 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | |
856 | return; | |
857 | ||
765e7fbf CK |
858 | abo = container_of(bo, struct amdgpu_bo, tbo); |
859 | amdgpu_vm_bo_invalidate(abo->adev, abo); | |
d38ceaf9 AD |
860 | |
861 | /* update statistics */ | |
862 | if (!new_mem) | |
863 | return; | |
864 | ||
865 | /* move_notify is called before move happens */ | |
765e7fbf | 866 | amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); |
15da301d | 867 | |
765e7fbf | 868 | trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); |
d38ceaf9 AD |
869 | } |
870 | ||
871 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |
872 | { | |
873 | struct amdgpu_device *adev; | |
5fb1941d CK |
874 | struct amdgpu_bo *abo; |
875 | unsigned long offset, size, lpfn; | |
876 | int i, r; | |
d38ceaf9 AD |
877 | |
878 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | |
879 | return 0; | |
5fb1941d CK |
880 | |
881 | abo = container_of(bo, struct amdgpu_bo, tbo); | |
882 | adev = abo->adev; | |
883 | if (bo->mem.mem_type != TTM_PL_VRAM) | |
884 | return 0; | |
885 | ||
886 | size = bo->mem.num_pages << PAGE_SHIFT; | |
887 | offset = bo->mem.start << PAGE_SHIFT; | |
888 | if ((offset + size) <= adev->mc.visible_vram_size) | |
889 | return 0; | |
890 | ||
104ece97 MD |
891 | /* Can't move a pinned BO to visible VRAM */ |
892 | if (abo->pin_count > 0) | |
893 | return -EINVAL; | |
894 | ||
5fb1941d CK |
895 | /* hurrah the memory is not visible ! */ |
896 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); | |
897 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | |
898 | for (i = 0; i < abo->placement.num_placement; i++) { | |
899 | /* Force into visible VRAM */ | |
900 | if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && | |
6681c5eb CK |
901 | (!abo->placements[i].lpfn || |
902 | abo->placements[i].lpfn > lpfn)) | |
5fb1941d CK |
903 | abo->placements[i].lpfn = lpfn; |
904 | } | |
905 | r = ttm_bo_validate(bo, &abo->placement, false, false); | |
906 | if (unlikely(r == -ENOMEM)) { | |
907 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); | |
908 | return ttm_bo_validate(bo, &abo->placement, false, false); | |
909 | } else if (unlikely(r != 0)) { | |
910 | return r; | |
d38ceaf9 | 911 | } |
5fb1941d CK |
912 | |
913 | offset = bo->mem.start << PAGE_SHIFT; | |
914 | /* this should never happen */ | |
915 | if ((offset + size) > adev->mc.visible_vram_size) | |
916 | return -EINVAL; | |
917 | ||
d38ceaf9 AD |
918 | return 0; |
919 | } | |
920 | ||
921 | /** | |
922 | * amdgpu_bo_fence - add fence to buffer object | |
923 | * | |
924 | * @bo: buffer object in question | |
925 | * @fence: fence to add | |
926 | * @shared: true if fence should be added shared | |
927 | * | |
928 | */ | |
f54d1867 | 929 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
d38ceaf9 AD |
930 | bool shared) |
931 | { | |
932 | struct reservation_object *resv = bo->tbo.resv; | |
933 | ||
934 | if (shared) | |
e40a3115 | 935 | reservation_object_add_shared_fence(resv, fence); |
d38ceaf9 | 936 | else |
e40a3115 | 937 | reservation_object_add_excl_fence(resv, fence); |
d38ceaf9 | 938 | } |
cdb7e8f2 CK |
939 | |
940 | /** | |
941 | * amdgpu_bo_gpu_offset - return GPU offset of bo | |
942 | * @bo: amdgpu object for which we query the offset | |
943 | * | |
944 | * Returns current GPU offset of the object. | |
945 | * | |
946 | * Note: object should either be pinned or reserved when calling this | |
947 | * function, it might be useful to add check for this for debugging. | |
948 | */ | |
949 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) | |
950 | { | |
951 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); | |
c855e250 CK |
952 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && |
953 | !amdgpu_ttm_is_bound(bo->tbo.ttm)); | |
cdb7e8f2 CK |
954 | WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && |
955 | !bo->pin_count); | |
9702d40d | 956 | WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); |
cdb7e8f2 CK |
957 | |
958 | return bo->tbo.offset; | |
959 | } |