]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
8d7cddcd | 36 | #include <ttm/ttm_page_alloc.h> |
771fe6b9 JG |
37 | #include <drm/drmP.h> |
38 | #include <drm/radeon_drm.h> | |
fa8a1238 | 39 | #include <linux/seq_file.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
4cfe7629 | 41 | #include <linux/swiotlb.h> |
f72a113a CK |
42 | #include <linux/swap.h> |
43 | #include <linux/pagemap.h> | |
2014b569 | 44 | #include <linux/debugfs.h> |
771fe6b9 JG |
45 | #include "radeon_reg.h" |
46 | #include "radeon.h" | |
47 | ||
48 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
49 | ||
fa8a1238 | 50 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
2014b569 | 51 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); |
fa8a1238 | 52 | |
771fe6b9 JG |
53 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
54 | { | |
55 | struct radeon_mman *mman; | |
56 | struct radeon_device *rdev; | |
57 | ||
58 | mman = container_of(bdev, struct radeon_mman, bdev); | |
59 | rdev = container_of(mman, struct radeon_device, mman); | |
60 | return rdev; | |
61 | } | |
62 | ||
63 | ||
64 | /* | |
65 | * Global memory. | |
66 | */ | |
ba4420c2 | 67 | static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) |
771fe6b9 JG |
68 | { |
69 | return ttm_mem_global_init(ref->object); | |
70 | } | |
71 | ||
ba4420c2 | 72 | static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) |
771fe6b9 JG |
73 | { |
74 | ttm_mem_global_release(ref->object); | |
75 | } | |
76 | ||
77 | static int radeon_ttm_global_init(struct radeon_device *rdev) | |
78 | { | |
ba4420c2 | 79 | struct drm_global_reference *global_ref; |
771fe6b9 JG |
80 | int r; |
81 | ||
82 | rdev->mman.mem_global_referenced = false; | |
83 | global_ref = &rdev->mman.mem_global_ref; | |
ba4420c2 | 84 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
771fe6b9 JG |
85 | global_ref->size = sizeof(struct ttm_mem_global); |
86 | global_ref->init = &radeon_ttm_mem_global_init; | |
87 | global_ref->release = &radeon_ttm_mem_global_release; | |
ba4420c2 | 88 | r = drm_global_item_ref(global_ref); |
771fe6b9 | 89 | if (r != 0) { |
a987fcaa TH |
90 | DRM_ERROR("Failed setting up TTM memory accounting " |
91 | "subsystem.\n"); | |
771fe6b9 JG |
92 | return r; |
93 | } | |
a987fcaa TH |
94 | |
95 | rdev->mman.bo_global_ref.mem_glob = | |
96 | rdev->mman.mem_global_ref.object; | |
97 | global_ref = &rdev->mman.bo_global_ref.ref; | |
ba4420c2 | 98 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
7f5f4db2 | 99 | global_ref->size = sizeof(struct ttm_bo_global); |
a987fcaa TH |
100 | global_ref->init = &ttm_bo_global_init; |
101 | global_ref->release = &ttm_bo_global_release; | |
ba4420c2 | 102 | r = drm_global_item_ref(global_ref); |
a987fcaa TH |
103 | if (r != 0) { |
104 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
ba4420c2 | 105 | drm_global_item_unref(&rdev->mman.mem_global_ref); |
a987fcaa TH |
106 | return r; |
107 | } | |
108 | ||
771fe6b9 JG |
109 | rdev->mman.mem_global_referenced = true; |
110 | return 0; | |
111 | } | |
112 | ||
113 | static void radeon_ttm_global_fini(struct radeon_device *rdev) | |
114 | { | |
115 | if (rdev->mman.mem_global_referenced) { | |
ba4420c2 DA |
116 | drm_global_item_unref(&rdev->mman.bo_global_ref.ref); |
117 | drm_global_item_unref(&rdev->mman.mem_global_ref); | |
771fe6b9 JG |
118 | rdev->mman.mem_global_referenced = false; |
119 | } | |
120 | } | |
121 | ||
771fe6b9 JG |
122 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
123 | { | |
124 | return 0; | |
125 | } | |
126 | ||
127 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
128 | struct ttm_mem_type_manager *man) | |
129 | { | |
130 | struct radeon_device *rdev; | |
131 | ||
132 | rdev = radeon_get_rdev(bdev); | |
133 | ||
134 | switch (type) { | |
135 | case TTM_PL_SYSTEM: | |
136 | /* System memory */ | |
137 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
138 | man->available_caching = TTM_PL_MASK_CACHING; | |
139 | man->default_caching = TTM_PL_FLAG_CACHED; | |
140 | break; | |
141 | case TTM_PL_TT: | |
d961db75 | 142 | man->func = &ttm_bo_manager_func; |
d594e46a | 143 | man->gpu_offset = rdev->mc.gtt_start; |
771fe6b9 JG |
144 | man->available_caching = TTM_PL_MASK_CACHING; |
145 | man->default_caching = TTM_PL_FLAG_CACHED; | |
55c93278 | 146 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
a7fb8a23 | 147 | #if IS_ENABLED(CONFIG_AGP) |
771fe6b9 | 148 | if (rdev->flags & RADEON_IS_AGP) { |
d9906753 | 149 | if (!rdev->ddev->agp) { |
771fe6b9 JG |
150 | DRM_ERROR("AGP is not enabled for memory type %u\n", |
151 | (unsigned)type); | |
152 | return -EINVAL; | |
153 | } | |
55c93278 | 154 | if (!rdev->ddev->agp->cant_use_aperture) |
0a2d50e3 | 155 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
771fe6b9 JG |
156 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
157 | TTM_PL_FLAG_WC; | |
158 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 | 159 | } |
0c321c79 | 160 | #endif |
771fe6b9 JG |
161 | break; |
162 | case TTM_PL_VRAM: | |
163 | /* "On-card" video ram */ | |
d961db75 | 164 | man->func = &ttm_bo_manager_func; |
d594e46a | 165 | man->gpu_offset = rdev->mc.vram_start; |
771fe6b9 | 166 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
771fe6b9 JG |
167 | TTM_MEMTYPE_FLAG_MAPPABLE; |
168 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
169 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 JG |
170 | break; |
171 | default: | |
172 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
173 | return -EINVAL; | |
174 | } | |
175 | return 0; | |
176 | } | |
177 | ||
312ea8da JG |
178 | static void radeon_evict_flags(struct ttm_buffer_object *bo, |
179 | struct ttm_placement *placement) | |
771fe6b9 | 180 | { |
f1217ed0 CK |
181 | static struct ttm_place placements = { |
182 | .fpfn = 0, | |
183 | .lpfn = 0, | |
184 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | |
185 | }; | |
186 | ||
d03d8589 | 187 | struct radeon_bo *rbo; |
d03d8589 JG |
188 | |
189 | if (!radeon_ttm_bo_is_radeon_bo(bo)) { | |
d03d8589 JG |
190 | placement->placement = &placements; |
191 | placement->busy_placement = &placements; | |
192 | placement->num_placement = 1; | |
193 | placement->num_busy_placement = 1; | |
194 | return; | |
195 | } | |
196 | rbo = container_of(bo, struct radeon_bo, tbo); | |
771fe6b9 | 197 | switch (bo->mem.mem_type) { |
312ea8da | 198 | case TTM_PL_VRAM: |
5e5c21ca | 199 | if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) |
9270eb1b | 200 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
2a85aedd MD |
201 | else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size && |
202 | bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { | |
203 | unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
204 | int i; | |
205 | ||
206 | /* Try evicting to the CPU inaccessible part of VRAM | |
207 | * first, but only set GTT as busy placement, so this | |
208 | * BO will be evicted to GTT rather than causing other | |
209 | * BOs to be evicted from VRAM | |
210 | */ | |
211 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM | | |
212 | RADEON_GEM_DOMAIN_GTT); | |
213 | rbo->placement.num_busy_placement = 0; | |
214 | for (i = 0; i < rbo->placement.num_placement; i++) { | |
215 | if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { | |
216 | if (rbo->placements[0].fpfn < fpfn) | |
217 | rbo->placements[0].fpfn = fpfn; | |
218 | } else { | |
219 | rbo->placement.busy_placement = | |
220 | &rbo->placements[i]; | |
221 | rbo->placement.num_busy_placement = 1; | |
222 | } | |
223 | } | |
224 | } else | |
9270eb1b | 225 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
312ea8da JG |
226 | break; |
227 | case TTM_PL_TT: | |
771fe6b9 | 228 | default: |
312ea8da | 229 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
771fe6b9 | 230 | } |
eaa5fd1a | 231 | *placement = rbo->placement; |
771fe6b9 JG |
232 | } |
233 | ||
234 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
235 | { | |
acb46527 DH |
236 | struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); |
237 | ||
b5dcec69 JG |
238 | if (radeon_ttm_tt_has_userptr(bo->ttm)) |
239 | return -EPERM; | |
acb46527 | 240 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); |
771fe6b9 JG |
241 | } |
242 | ||
243 | static void radeon_move_null(struct ttm_buffer_object *bo, | |
244 | struct ttm_mem_reg *new_mem) | |
245 | { | |
246 | struct ttm_mem_reg *old_mem = &bo->mem; | |
247 | ||
248 | BUG_ON(old_mem->mm_node != NULL); | |
249 | *old_mem = *new_mem; | |
250 | new_mem->mm_node = NULL; | |
251 | } | |
252 | ||
253 | static int radeon_move_blit(struct ttm_buffer_object *bo, | |
97a875cb | 254 | bool evict, bool no_wait_gpu, |
9d87fa21 JG |
255 | struct ttm_mem_reg *new_mem, |
256 | struct ttm_mem_reg *old_mem) | |
771fe6b9 JG |
257 | { |
258 | struct radeon_device *rdev; | |
259 | uint64_t old_start, new_start; | |
876dc9f3 | 260 | struct radeon_fence *fence; |
57d20a43 | 261 | unsigned num_pages; |
876dc9f3 | 262 | int r, ridx; |
771fe6b9 JG |
263 | |
264 | rdev = radeon_get_rdev(bo->bdev); | |
876dc9f3 | 265 | ridx = radeon_copy_ring_index(rdev); |
d961db75 BS |
266 | old_start = old_mem->start << PAGE_SHIFT; |
267 | new_start = new_mem->start << PAGE_SHIFT; | |
771fe6b9 JG |
268 | |
269 | switch (old_mem->mem_type) { | |
270 | case TTM_PL_VRAM: | |
d594e46a | 271 | old_start += rdev->mc.vram_start; |
771fe6b9 JG |
272 | break; |
273 | case TTM_PL_TT: | |
d594e46a | 274 | old_start += rdev->mc.gtt_start; |
771fe6b9 JG |
275 | break; |
276 | default: | |
277 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
278 | return -EINVAL; | |
279 | } | |
280 | switch (new_mem->mem_type) { | |
281 | case TTM_PL_VRAM: | |
d594e46a | 282 | new_start += rdev->mc.vram_start; |
771fe6b9 JG |
283 | break; |
284 | case TTM_PL_TT: | |
d594e46a | 285 | new_start += rdev->mc.gtt_start; |
771fe6b9 JG |
286 | break; |
287 | default: | |
288 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
289 | return -EINVAL; | |
290 | } | |
876dc9f3 | 291 | if (!rdev->ring[ridx].ready) { |
3000bf39 | 292 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
771fe6b9 JG |
293 | return -EINVAL; |
294 | } | |
003cefe0 AD |
295 | |
296 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | |
297 | ||
57d20a43 CK |
298 | num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
299 | fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); | |
300 | if (IS_ERR(fence)) | |
301 | return PTR_ERR(fence); | |
302 | ||
74561cd4 | 303 | r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem); |
771fe6b9 JG |
304 | radeon_fence_unref(&fence); |
305 | return r; | |
306 | } | |
307 | ||
308 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |
9d87fa21 | 309 | bool evict, bool interruptible, |
97a875cb | 310 | bool no_wait_gpu, |
771fe6b9 JG |
311 | struct ttm_mem_reg *new_mem) |
312 | { | |
313 | struct radeon_device *rdev; | |
314 | struct ttm_mem_reg *old_mem = &bo->mem; | |
315 | struct ttm_mem_reg tmp_mem; | |
f1217ed0 | 316 | struct ttm_place placements; |
312ea8da | 317 | struct ttm_placement placement; |
771fe6b9 JG |
318 | int r; |
319 | ||
320 | rdev = radeon_get_rdev(bo->bdev); | |
321 | tmp_mem = *new_mem; | |
322 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
323 | placement.num_placement = 1; |
324 | placement.placement = &placements; | |
325 | placement.num_busy_placement = 1; | |
326 | placement.busy_placement = &placements; | |
f1217ed0 CK |
327 | placements.fpfn = 0; |
328 | placements.lpfn = 0; | |
329 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
312ea8da | 330 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
97a875cb | 331 | interruptible, no_wait_gpu); |
771fe6b9 JG |
332 | if (unlikely(r)) { |
333 | return r; | |
334 | } | |
df67bed9 DA |
335 | |
336 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
337 | if (unlikely(r)) { | |
338 | goto out_cleanup; | |
339 | } | |
340 | ||
771fe6b9 JG |
341 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
342 | if (unlikely(r)) { | |
343 | goto out_cleanup; | |
344 | } | |
97a875cb | 345 | r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); |
771fe6b9 JG |
346 | if (unlikely(r)) { |
347 | goto out_cleanup; | |
348 | } | |
34b58355 | 349 | r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); |
771fe6b9 | 350 | out_cleanup: |
42311ff9 | 351 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
352 | return r; |
353 | } | |
354 | ||
355 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |
9d87fa21 | 356 | bool evict, bool interruptible, |
97a875cb | 357 | bool no_wait_gpu, |
771fe6b9 JG |
358 | struct ttm_mem_reg *new_mem) |
359 | { | |
360 | struct radeon_device *rdev; | |
361 | struct ttm_mem_reg *old_mem = &bo->mem; | |
362 | struct ttm_mem_reg tmp_mem; | |
312ea8da | 363 | struct ttm_placement placement; |
f1217ed0 | 364 | struct ttm_place placements; |
771fe6b9 JG |
365 | int r; |
366 | ||
367 | rdev = radeon_get_rdev(bo->bdev); | |
368 | tmp_mem = *new_mem; | |
369 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
370 | placement.num_placement = 1; |
371 | placement.placement = &placements; | |
372 | placement.num_busy_placement = 1; | |
373 | placement.busy_placement = &placements; | |
f1217ed0 CK |
374 | placements.fpfn = 0; |
375 | placements.lpfn = 0; | |
376 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
97a875cb ML |
377 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
378 | interruptible, no_wait_gpu); | |
771fe6b9 JG |
379 | if (unlikely(r)) { |
380 | return r; | |
381 | } | |
34b58355 | 382 | r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); |
771fe6b9 JG |
383 | if (unlikely(r)) { |
384 | goto out_cleanup; | |
385 | } | |
97a875cb | 386 | r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); |
771fe6b9 JG |
387 | if (unlikely(r)) { |
388 | goto out_cleanup; | |
389 | } | |
390 | out_cleanup: | |
42311ff9 | 391 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
392 | return r; |
393 | } | |
394 | ||
395 | static int radeon_bo_move(struct ttm_buffer_object *bo, | |
9d87fa21 | 396 | bool evict, bool interruptible, |
97a875cb | 397 | bool no_wait_gpu, |
9d87fa21 | 398 | struct ttm_mem_reg *new_mem) |
771fe6b9 JG |
399 | { |
400 | struct radeon_device *rdev; | |
e1a575ad | 401 | struct radeon_bo *rbo; |
771fe6b9 JG |
402 | struct ttm_mem_reg *old_mem = &bo->mem; |
403 | int r; | |
404 | ||
88932a7b CK |
405 | r = ttm_bo_wait(bo, interruptible, no_wait_gpu); |
406 | if (r) | |
407 | return r; | |
408 | ||
e1a575ad MD |
409 | /* Can't move a pinned BO */ |
410 | rbo = container_of(bo, struct radeon_bo, tbo); | |
411 | if (WARN_ON_ONCE(rbo->pin_count > 0)) | |
412 | return -EINVAL; | |
413 | ||
771fe6b9 JG |
414 | rdev = radeon_get_rdev(bo->bdev); |
415 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
416 | radeon_move_null(bo, new_mem); | |
417 | return 0; | |
418 | } | |
419 | if ((old_mem->mem_type == TTM_PL_TT && | |
420 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
421 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
422 | new_mem->mem_type == TTM_PL_TT)) { | |
af901ca1 | 423 | /* bind is enough */ |
771fe6b9 JG |
424 | radeon_move_null(bo, new_mem); |
425 | return 0; | |
426 | } | |
27cd7769 AD |
427 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || |
428 | rdev->asic->copy.copy == NULL) { | |
771fe6b9 | 429 | /* use memcpy */ |
1ab2e105 | 430 | goto memcpy; |
771fe6b9 JG |
431 | } |
432 | ||
433 | if (old_mem->mem_type == TTM_PL_VRAM && | |
434 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
1ab2e105 | 435 | r = radeon_move_vram_ram(bo, evict, interruptible, |
97a875cb | 436 | no_wait_gpu, new_mem); |
771fe6b9 JG |
437 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
438 | new_mem->mem_type == TTM_PL_VRAM) { | |
1ab2e105 | 439 | r = radeon_move_ram_vram(bo, evict, interruptible, |
97a875cb | 440 | no_wait_gpu, new_mem); |
771fe6b9 | 441 | } else { |
97a875cb | 442 | r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); |
771fe6b9 | 443 | } |
1ab2e105 MD |
444 | |
445 | if (r) { | |
446 | memcpy: | |
77dfc28b CK |
447 | r = ttm_bo_move_memcpy(bo, evict, interruptible, |
448 | no_wait_gpu, new_mem); | |
67e8e3f9 MO |
449 | if (r) { |
450 | return r; | |
451 | } | |
1ab2e105 | 452 | } |
67e8e3f9 MO |
453 | |
454 | /* update statistics */ | |
455 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); | |
456 | return 0; | |
771fe6b9 JG |
457 | } |
458 | ||
0a2d50e3 JG |
459 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
460 | { | |
461 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
462 | struct radeon_device *rdev = radeon_get_rdev(bdev); | |
463 | ||
464 | mem->bus.addr = NULL; | |
465 | mem->bus.offset = 0; | |
466 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
467 | mem->bus.base = 0; | |
468 | mem->bus.is_iomem = false; | |
469 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
470 | return -EINVAL; | |
471 | switch (mem->mem_type) { | |
472 | case TTM_PL_SYSTEM: | |
473 | /* system memory */ | |
474 | return 0; | |
475 | case TTM_PL_TT: | |
a7fb8a23 | 476 | #if IS_ENABLED(CONFIG_AGP) |
0a2d50e3 JG |
477 | if (rdev->flags & RADEON_IS_AGP) { |
478 | /* RADEON_IS_AGP is set only if AGP is active */ | |
d961db75 | 479 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 | 480 | mem->bus.base = rdev->mc.agp_base; |
365048ff | 481 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
0a2d50e3 JG |
482 | } |
483 | #endif | |
484 | break; | |
485 | case TTM_PL_VRAM: | |
d961db75 | 486 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 JG |
487 | /* check if it's visible */ |
488 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) | |
489 | return -EINVAL; | |
490 | mem->bus.base = rdev->mc.aper_base; | |
491 | mem->bus.is_iomem = true; | |
ffb57c4b JE |
492 | #ifdef __alpha__ |
493 | /* | |
494 | * Alpha: use bus.addr to hold the ioremap() return, | |
495 | * so we can modify bus.base below. | |
496 | */ | |
497 | if (mem->placement & TTM_PL_FLAG_WC) | |
498 | mem->bus.addr = | |
499 | ioremap_wc(mem->bus.base + mem->bus.offset, | |
500 | mem->bus.size); | |
501 | else | |
502 | mem->bus.addr = | |
503 | ioremap_nocache(mem->bus.base + mem->bus.offset, | |
504 | mem->bus.size); | |
505 | ||
506 | /* | |
507 | * Alpha: Use just the bus offset plus | |
508 | * the hose/domain memory base for bus.base. | |
509 | * It then can be used to build PTEs for VRAM | |
510 | * access, as done in ttm_bo_vm_fault(). | |
511 | */ | |
512 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | |
513 | rdev->ddev->hose->dense_mem_base; | |
514 | #endif | |
0a2d50e3 JG |
515 | break; |
516 | default: | |
517 | return -EINVAL; | |
518 | } | |
519 | return 0; | |
520 | } | |
521 | ||
522 | static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
523 | { | |
524 | } | |
525 | ||
649bf3ca JG |
526 | /* |
527 | * TTM backend functions. | |
528 | */ | |
529 | struct radeon_ttm_tt { | |
8e7e7052 | 530 | struct ttm_dma_tt ttm; |
649bf3ca JG |
531 | struct radeon_device *rdev; |
532 | u64 offset; | |
f72a113a CK |
533 | |
534 | uint64_t userptr; | |
535 | struct mm_struct *usermm; | |
536 | uint32_t userflags; | |
649bf3ca JG |
537 | }; |
538 | ||
f72a113a CK |
539 | /* prepare the sg table with the user pages */ |
540 | static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) | |
541 | { | |
542 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); | |
543 | struct radeon_ttm_tt *gtt = (void *)ttm; | |
544 | unsigned pinned = 0, nents; | |
545 | int r; | |
546 | ||
547 | int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | |
548 | enum dma_data_direction direction = write ? | |
549 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
550 | ||
551 | if (current->mm != gtt->usermm) | |
552 | return -EPERM; | |
553 | ||
ddd00e33 CK |
554 | if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { |
555 | /* check that we only pin down anonymous memory | |
556 | to prevent problems with writeback */ | |
557 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; | |
558 | struct vm_area_struct *vma; | |
559 | vma = find_vma(gtt->usermm, gtt->userptr); | |
560 | if (!vma || vma->vm_file || vma->vm_end < end) | |
561 | return -EPERM; | |
562 | } | |
563 | ||
f72a113a CK |
564 | do { |
565 | unsigned num_pages = ttm->num_pages - pinned; | |
566 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | |
567 | struct page **pages = ttm->pages + pinned; | |
568 | ||
d4edcf0d | 569 | r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); |
f72a113a CK |
570 | if (r < 0) |
571 | goto release_pages; | |
572 | ||
573 | pinned += r; | |
574 | ||
575 | } while (pinned < ttm->num_pages); | |
576 | ||
577 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, | |
578 | ttm->num_pages << PAGE_SHIFT, | |
579 | GFP_KERNEL); | |
580 | if (r) | |
581 | goto release_sg; | |
582 | ||
583 | r = -ENOMEM; | |
584 | nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
585 | if (nents != ttm->sg->nents) | |
586 | goto release_sg; | |
587 | ||
588 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
589 | gtt->ttm.dma_address, ttm->num_pages); | |
590 | ||
591 | return 0; | |
592 | ||
593 | release_sg: | |
594 | kfree(ttm->sg); | |
595 | ||
596 | release_pages: | |
597 | release_pages(ttm->pages, pinned, 0); | |
598 | return r; | |
599 | } | |
600 | ||
601 | static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |
602 | { | |
603 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); | |
604 | struct radeon_ttm_tt *gtt = (void *)ttm; | |
db12973c | 605 | struct sg_page_iter sg_iter; |
f72a113a CK |
606 | |
607 | int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | |
608 | enum dma_data_direction direction = write ? | |
609 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
610 | ||
863653fe CK |
611 | /* double check that we don't free the table twice */ |
612 | if (!ttm->sg->sgl) | |
613 | return; | |
614 | ||
f72a113a CK |
615 | /* free the sg table and pages again */ |
616 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
617 | ||
db12973c | 618 | for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { |
619 | struct page *page = sg_page_iter_page(&sg_iter); | |
f72a113a CK |
620 | if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) |
621 | set_page_dirty(page); | |
622 | ||
623 | mark_page_accessed(page); | |
09cbfeaf | 624 | put_page(page); |
f72a113a CK |
625 | } |
626 | ||
627 | sg_free_table(ttm->sg); | |
628 | } | |
629 | ||
649bf3ca JG |
630 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, |
631 | struct ttm_mem_reg *bo_mem) | |
632 | { | |
8e7e7052 | 633 | struct radeon_ttm_tt *gtt = (void*)ttm; |
77497f27 MD |
634 | uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | |
635 | RADEON_GART_PAGE_WRITE; | |
649bf3ca JG |
636 | int r; |
637 | ||
f72a113a CK |
638 | if (gtt->userptr) { |
639 | radeon_ttm_tt_pin_userptr(ttm); | |
640 | flags &= ~RADEON_GART_PAGE_WRITE; | |
641 | } | |
642 | ||
649bf3ca JG |
643 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
644 | if (!ttm->num_pages) { | |
645 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
646 | ttm->num_pages, bo_mem, ttm); | |
647 | } | |
77497f27 MD |
648 | if (ttm->caching_state == tt_cached) |
649 | flags |= RADEON_GART_PAGE_SNOOP; | |
650 | r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages, | |
651 | ttm->pages, gtt->ttm.dma_address, flags); | |
649bf3ca JG |
652 | if (r) { |
653 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
654 | ttm->num_pages, (unsigned)gtt->offset); | |
655 | return r; | |
656 | } | |
657 | return 0; | |
658 | } | |
659 | ||
660 | static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) | |
661 | { | |
8e7e7052 | 662 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 663 | |
649bf3ca | 664 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); |
f72a113a CK |
665 | |
666 | if (gtt->userptr) | |
667 | radeon_ttm_tt_unpin_userptr(ttm); | |
668 | ||
649bf3ca JG |
669 | return 0; |
670 | } | |
671 | ||
672 | static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) | |
673 | { | |
8e7e7052 | 674 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 675 | |
8e7e7052 | 676 | ttm_dma_tt_fini(>t->ttm); |
649bf3ca JG |
677 | kfree(gtt); |
678 | } | |
679 | ||
680 | static struct ttm_backend_func radeon_backend_func = { | |
681 | .bind = &radeon_ttm_backend_bind, | |
682 | .unbind = &radeon_ttm_backend_unbind, | |
683 | .destroy = &radeon_ttm_backend_destroy, | |
684 | }; | |
685 | ||
1109ca09 | 686 | static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, |
649bf3ca JG |
687 | unsigned long size, uint32_t page_flags, |
688 | struct page *dummy_read_page) | |
689 | { | |
690 | struct radeon_device *rdev; | |
691 | struct radeon_ttm_tt *gtt; | |
692 | ||
693 | rdev = radeon_get_rdev(bdev); | |
a7fb8a23 | 694 | #if IS_ENABLED(CONFIG_AGP) |
649bf3ca JG |
695 | if (rdev->flags & RADEON_IS_AGP) { |
696 | return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, | |
697 | size, page_flags, dummy_read_page); | |
698 | } | |
699 | #endif | |
700 | ||
701 | gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); | |
702 | if (gtt == NULL) { | |
703 | return NULL; | |
704 | } | |
8e7e7052 | 705 | gtt->ttm.ttm.func = &radeon_backend_func; |
649bf3ca | 706 | gtt->rdev = rdev; |
8e7e7052 JG |
707 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { |
708 | kfree(gtt); | |
649bf3ca JG |
709 | return NULL; |
710 | } | |
8e7e7052 | 711 | return >t->ttm.ttm; |
649bf3ca JG |
712 | } |
713 | ||
3840a656 CK |
714 | static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) |
715 | { | |
716 | if (!ttm || ttm->func != &radeon_backend_func) | |
717 | return NULL; | |
718 | return (struct radeon_ttm_tt *)ttm; | |
719 | } | |
720 | ||
c52494f6 KRW |
721 | static int radeon_ttm_tt_populate(struct ttm_tt *ttm) |
722 | { | |
3840a656 | 723 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
c52494f6 KRW |
724 | struct radeon_device *rdev; |
725 | unsigned i; | |
726 | int r; | |
40f5cf99 | 727 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
c52494f6 KRW |
728 | |
729 | if (ttm->state != tt_unpopulated) | |
730 | return 0; | |
731 | ||
3840a656 | 732 | if (gtt && gtt->userptr) { |
69ee2410 | 733 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
f72a113a CK |
734 | if (!ttm->sg) |
735 | return -ENOMEM; | |
736 | ||
737 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | |
738 | ttm->state = tt_unbound; | |
739 | return 0; | |
740 | } | |
741 | ||
40f5cf99 AD |
742 | if (slave && ttm->sg) { |
743 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
744 | gtt->ttm.dma_address, ttm->num_pages); | |
745 | ttm->state = tt_unbound; | |
746 | return 0; | |
747 | } | |
748 | ||
c52494f6 | 749 | rdev = radeon_get_rdev(ttm->bdev); |
a7fb8a23 | 750 | #if IS_ENABLED(CONFIG_AGP) |
dea7e0ac JG |
751 | if (rdev->flags & RADEON_IS_AGP) { |
752 | return ttm_agp_tt_populate(ttm); | |
753 | } | |
754 | #endif | |
c52494f6 KRW |
755 | |
756 | #ifdef CONFIG_SWIOTLB | |
757 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 758 | return ttm_dma_populate(>t->ttm, rdev->dev); |
c52494f6 KRW |
759 | } |
760 | #endif | |
761 | ||
762 | r = ttm_pool_populate(ttm); | |
763 | if (r) { | |
764 | return r; | |
765 | } | |
766 | ||
767 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
768 | gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], |
769 | 0, PAGE_SIZE, | |
770 | PCI_DMA_BIDIRECTIONAL); | |
771 | if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { | |
bc3f5d8c | 772 | while (i--) { |
8e7e7052 | 773 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], |
c52494f6 | 774 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
8e7e7052 | 775 | gtt->ttm.dma_address[i] = 0; |
c52494f6 KRW |
776 | } |
777 | ttm_pool_unpopulate(ttm); | |
778 | return -EFAULT; | |
779 | } | |
780 | } | |
781 | return 0; | |
782 | } | |
783 | ||
784 | static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
785 | { | |
786 | struct radeon_device *rdev; | |
3840a656 | 787 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
c52494f6 | 788 | unsigned i; |
40f5cf99 AD |
789 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
790 | ||
3840a656 | 791 | if (gtt && gtt->userptr) { |
f72a113a CK |
792 | kfree(ttm->sg); |
793 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | |
794 | return; | |
795 | } | |
796 | ||
40f5cf99 AD |
797 | if (slave) |
798 | return; | |
c52494f6 KRW |
799 | |
800 | rdev = radeon_get_rdev(ttm->bdev); | |
a7fb8a23 | 801 | #if IS_ENABLED(CONFIG_AGP) |
dea7e0ac JG |
802 | if (rdev->flags & RADEON_IS_AGP) { |
803 | ttm_agp_tt_unpopulate(ttm); | |
804 | return; | |
805 | } | |
806 | #endif | |
c52494f6 KRW |
807 | |
808 | #ifdef CONFIG_SWIOTLB | |
809 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 810 | ttm_dma_unpopulate(>t->ttm, rdev->dev); |
c52494f6 KRW |
811 | return; |
812 | } | |
813 | #endif | |
814 | ||
815 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
816 | if (gtt->ttm.dma_address[i]) { |
817 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], | |
c52494f6 KRW |
818 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
819 | } | |
820 | } | |
821 | ||
822 | ttm_pool_unpopulate(ttm); | |
823 | } | |
649bf3ca | 824 | |
f72a113a CK |
825 | int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
826 | uint32_t flags) | |
827 | { | |
3840a656 | 828 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
f72a113a CK |
829 | |
830 | if (gtt == NULL) | |
831 | return -EINVAL; | |
832 | ||
833 | gtt->userptr = addr; | |
834 | gtt->usermm = current->mm; | |
835 | gtt->userflags = flags; | |
836 | return 0; | |
837 | } | |
838 | ||
839 | bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) | |
840 | { | |
3840a656 | 841 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
f72a113a CK |
842 | |
843 | if (gtt == NULL) | |
844 | return false; | |
845 | ||
846 | return !!gtt->userptr; | |
847 | } | |
848 | ||
849 | bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) | |
850 | { | |
3840a656 | 851 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
f72a113a CK |
852 | |
853 | if (gtt == NULL) | |
854 | return false; | |
855 | ||
856 | return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | |
857 | } | |
858 | ||
771fe6b9 | 859 | static struct ttm_bo_driver radeon_bo_driver = { |
649bf3ca | 860 | .ttm_tt_create = &radeon_ttm_tt_create, |
c52494f6 KRW |
861 | .ttm_tt_populate = &radeon_ttm_tt_populate, |
862 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, | |
771fe6b9 JG |
863 | .invalidate_caches = &radeon_invalidate_caches, |
864 | .init_mem_type = &radeon_init_mem_type, | |
865 | .evict_flags = &radeon_evict_flags, | |
866 | .move = &radeon_bo_move, | |
867 | .verify_access = &radeon_verify_access, | |
e024e110 DA |
868 | .move_notify = &radeon_bo_move_notify, |
869 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | |
0a2d50e3 JG |
870 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, |
871 | .io_mem_free = &radeon_ttm_io_mem_free, | |
98c2872a CK |
872 | .lru_tail = &ttm_bo_default_lru_tail, |
873 | .swap_lru_tail = &ttm_bo_default_swap_lru_tail, | |
771fe6b9 JG |
874 | }; |
875 | ||
876 | int radeon_ttm_init(struct radeon_device *rdev) | |
877 | { | |
878 | int r; | |
879 | ||
880 | r = radeon_ttm_global_init(rdev); | |
881 | if (r) { | |
882 | return r; | |
883 | } | |
884 | /* No others user of address space so set it to 0 */ | |
885 | r = ttm_bo_device_init(&rdev->mman.bdev, | |
a987fcaa | 886 | rdev->mman.bo_global_ref.ref.object, |
44d847b7 DH |
887 | &radeon_bo_driver, |
888 | rdev->ddev->anon_inode->i_mapping, | |
889 | DRM_FILE_PAGE_OFFSET, | |
ad49f501 | 890 | rdev->need_dma32); |
771fe6b9 JG |
891 | if (r) { |
892 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
893 | return r; | |
894 | } | |
0a0c7596 | 895 | rdev->mman.initialized = true; |
4c788679 | 896 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
312ea8da | 897 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
771fe6b9 JG |
898 | if (r) { |
899 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
900 | return r; | |
901 | } | |
14eedc32 LK |
902 | /* Change the size here instead of the init above so only lpfn is affected */ |
903 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | |
904 | ||
441921d5 | 905 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
831b6966 | 906 | RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
40f5cf99 | 907 | NULL, &rdev->stollen_vga_memory); |
771fe6b9 JG |
908 | if (r) { |
909 | return r; | |
910 | } | |
4c788679 JG |
911 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
912 | if (r) | |
913 | return r; | |
914 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | |
915 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
771fe6b9 | 916 | if (r) { |
4c788679 | 917 | radeon_bo_unref(&rdev->stollen_vga_memory); |
771fe6b9 JG |
918 | return r; |
919 | } | |
920 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | |
fc986034 | 921 | (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); |
4c788679 | 922 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
312ea8da | 923 | rdev->mc.gtt_size >> PAGE_SHIFT); |
771fe6b9 JG |
924 | if (r) { |
925 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
926 | return r; | |
927 | } | |
928 | DRM_INFO("radeon: %uM of GTT memory ready.\n", | |
3ce0a23d | 929 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
fa8a1238 DA |
930 | |
931 | r = radeon_ttm_debugfs_init(rdev); | |
932 | if (r) { | |
933 | DRM_ERROR("Failed to init debugfs\n"); | |
934 | return r; | |
935 | } | |
771fe6b9 JG |
936 | return 0; |
937 | } | |
938 | ||
939 | void radeon_ttm_fini(struct radeon_device *rdev) | |
940 | { | |
4c788679 JG |
941 | int r; |
942 | ||
0a0c7596 JG |
943 | if (!rdev->mman.initialized) |
944 | return; | |
2014b569 | 945 | radeon_ttm_debugfs_fini(rdev); |
771fe6b9 | 946 | if (rdev->stollen_vga_memory) { |
4c788679 JG |
947 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
948 | if (r == 0) { | |
949 | radeon_bo_unpin(rdev->stollen_vga_memory); | |
950 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
951 | } | |
952 | radeon_bo_unref(&rdev->stollen_vga_memory); | |
771fe6b9 JG |
953 | } |
954 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
955 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | |
956 | ttm_bo_device_release(&rdev->mman.bdev); | |
957 | radeon_gart_fini(rdev); | |
958 | radeon_ttm_global_fini(rdev); | |
0a0c7596 | 959 | rdev->mman.initialized = false; |
771fe6b9 JG |
960 | DRM_INFO("radeon: ttm finalized\n"); |
961 | } | |
962 | ||
53595338 DA |
963 | /* this should only be called at bootup or when userspace |
964 | * isn't running */ | |
965 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | |
966 | { | |
967 | struct ttm_mem_type_manager *man; | |
968 | ||
969 | if (!rdev->mman.initialized) | |
970 | return; | |
971 | ||
972 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
973 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | |
974 | man->size = size >> PAGE_SHIFT; | |
975 | } | |
976 | ||
771fe6b9 | 977 | static struct vm_operations_struct radeon_ttm_vm_ops; |
f0f37e2f | 978 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
771fe6b9 JG |
979 | |
980 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
981 | { | |
982 | struct ttm_buffer_object *bo; | |
5876dd24 | 983 | struct radeon_device *rdev; |
771fe6b9 JG |
984 | int r; |
985 | ||
5876dd24 | 986 | bo = (struct ttm_buffer_object *)vma->vm_private_data; |
771fe6b9 JG |
987 | if (bo == NULL) { |
988 | return VM_FAULT_NOPAGE; | |
989 | } | |
5876dd24 | 990 | rdev = radeon_get_rdev(bo->bdev); |
db7fce39 | 991 | down_read(&rdev->pm.mclk_lock); |
771fe6b9 | 992 | r = ttm_vm_ops->fault(vma, vmf); |
db7fce39 | 993 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 JG |
994 | return r; |
995 | } | |
996 | ||
997 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |
998 | { | |
999 | struct drm_file *file_priv; | |
1000 | struct radeon_device *rdev; | |
1001 | int r; | |
1002 | ||
1003 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | |
884c6dab | 1004 | return -EINVAL; |
771fe6b9 JG |
1005 | } |
1006 | ||
40b3be3f | 1007 | file_priv = filp->private_data; |
771fe6b9 JG |
1008 | rdev = file_priv->minor->dev->dev_private; |
1009 | if (rdev == NULL) { | |
1010 | return -EINVAL; | |
1011 | } | |
1012 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); | |
1013 | if (unlikely(r != 0)) { | |
1014 | return r; | |
1015 | } | |
1016 | if (unlikely(ttm_vm_ops == NULL)) { | |
1017 | ttm_vm_ops = vma->vm_ops; | |
1018 | radeon_ttm_vm_ops = *ttm_vm_ops; | |
1019 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; | |
1020 | } | |
1021 | vma->vm_ops = &radeon_ttm_vm_ops; | |
1022 | return 0; | |
1023 | } | |
1024 | ||
fa8a1238 | 1025 | #if defined(CONFIG_DEBUG_FS) |
893d6e6e | 1026 | |
fa8a1238 DA |
1027 | static int radeon_mm_dump_table(struct seq_file *m, void *data) |
1028 | { | |
1029 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
893d6e6e | 1030 | unsigned ttm_pl = *(int *)node->info_ent->data; |
fa8a1238 DA |
1031 | struct drm_device *dev = node->minor->dev; |
1032 | struct radeon_device *rdev = dev->dev_private; | |
893d6e6e | 1033 | struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv; |
fa8a1238 DA |
1034 | int ret; |
1035 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | |
1036 | ||
1037 | spin_lock(&glob->lru_lock); | |
1038 | ret = drm_mm_dump_table(m, mm); | |
1039 | spin_unlock(&glob->lru_lock); | |
1040 | return ret; | |
1041 | } | |
893d6e6e CK |
1042 | |
1043 | static int ttm_pl_vram = TTM_PL_VRAM; | |
1044 | static int ttm_pl_tt = TTM_PL_TT; | |
1045 | ||
1046 | static struct drm_info_list radeon_ttm_debugfs_list[] = { | |
1047 | {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram}, | |
1048 | {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt}, | |
1049 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | |
1050 | #ifdef CONFIG_SWIOTLB | |
1051 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | |
1052 | #endif | |
1053 | }; | |
1054 | ||
2014b569 CK |
1055 | static int radeon_ttm_vram_open(struct inode *inode, struct file *filep) |
1056 | { | |
1057 | struct radeon_device *rdev = inode->i_private; | |
1058 | i_size_write(inode, rdev->mc.mc_vram_size); | |
1059 | filep->private_data = inode->i_private; | |
1060 | return 0; | |
1061 | } | |
1062 | ||
1063 | static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf, | |
1064 | size_t size, loff_t *pos) | |
1065 | { | |
1066 | struct radeon_device *rdev = f->private_data; | |
1067 | ssize_t result = 0; | |
1068 | int r; | |
1069 | ||
1070 | if (size & 0x3 || *pos & 0x3) | |
1071 | return -EINVAL; | |
1072 | ||
1073 | while (size) { | |
1074 | unsigned long flags; | |
1075 | uint32_t value; | |
1076 | ||
1077 | if (*pos >= rdev->mc.mc_vram_size) | |
1078 | return result; | |
1079 | ||
1080 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); | |
1081 | WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000); | |
1082 | if (rdev->family >= CHIP_CEDAR) | |
1083 | WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31); | |
1084 | value = RREG32(RADEON_MM_DATA); | |
1085 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); | |
1086 | ||
1087 | r = put_user(value, (uint32_t *)buf); | |
1088 | if (r) | |
1089 | return r; | |
1090 | ||
1091 | result += 4; | |
1092 | buf += 4; | |
1093 | *pos += 4; | |
1094 | size -= 4; | |
1095 | } | |
1096 | ||
1097 | return result; | |
1098 | } | |
1099 | ||
1100 | static const struct file_operations radeon_ttm_vram_fops = { | |
1101 | .owner = THIS_MODULE, | |
1102 | .open = radeon_ttm_vram_open, | |
1103 | .read = radeon_ttm_vram_read, | |
1104 | .llseek = default_llseek | |
1105 | }; | |
1106 | ||
dd66d20e CK |
1107 | static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep) |
1108 | { | |
1109 | struct radeon_device *rdev = inode->i_private; | |
1110 | i_size_write(inode, rdev->mc.gtt_size); | |
1111 | filep->private_data = inode->i_private; | |
1112 | return 0; | |
1113 | } | |
1114 | ||
1115 | static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, | |
1116 | size_t size, loff_t *pos) | |
1117 | { | |
1118 | struct radeon_device *rdev = f->private_data; | |
1119 | ssize_t result = 0; | |
1120 | int r; | |
1121 | ||
1122 | while (size) { | |
1123 | loff_t p = *pos / PAGE_SIZE; | |
1124 | unsigned off = *pos & ~PAGE_MASK; | |
0d997b68 | 1125 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); |
dd66d20e CK |
1126 | struct page *page; |
1127 | void *ptr; | |
1128 | ||
1129 | if (p >= rdev->gart.num_cpu_pages) | |
1130 | return result; | |
1131 | ||
1132 | page = rdev->gart.pages[p]; | |
1133 | if (page) { | |
1134 | ptr = kmap(page); | |
1135 | ptr += off; | |
1136 | ||
1137 | r = copy_to_user(buf, ptr, cur_size); | |
1138 | kunmap(rdev->gart.pages[p]); | |
1139 | } else | |
1140 | r = clear_user(buf, cur_size); | |
1141 | ||
1142 | if (r) | |
1143 | return -EFAULT; | |
1144 | ||
1145 | result += cur_size; | |
1146 | buf += cur_size; | |
1147 | *pos += cur_size; | |
1148 | size -= cur_size; | |
1149 | } | |
1150 | ||
1151 | return result; | |
1152 | } | |
1153 | ||
1154 | static const struct file_operations radeon_ttm_gtt_fops = { | |
1155 | .owner = THIS_MODULE, | |
1156 | .open = radeon_ttm_gtt_open, | |
1157 | .read = radeon_ttm_gtt_read, | |
1158 | .llseek = default_llseek | |
1159 | }; | |
1160 | ||
fa8a1238 DA |
1161 | #endif |
1162 | ||
1163 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |
1164 | { | |
f4e45d02 | 1165 | #if defined(CONFIG_DEBUG_FS) |
2014b569 CK |
1166 | unsigned count; |
1167 | ||
1168 | struct drm_minor *minor = rdev->ddev->primary; | |
1169 | struct dentry *ent, *root = minor->debugfs_root; | |
1170 | ||
1171 | ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root, | |
1172 | rdev, &radeon_ttm_vram_fops); | |
1173 | if (IS_ERR(ent)) | |
1174 | return PTR_ERR(ent); | |
1175 | rdev->mman.vram = ent; | |
1176 | ||
dd66d20e CK |
1177 | ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root, |
1178 | rdev, &radeon_ttm_gtt_fops); | |
1179 | if (IS_ERR(ent)) | |
1180 | return PTR_ERR(ent); | |
1181 | rdev->mman.gtt = ent; | |
1182 | ||
2014b569 | 1183 | count = ARRAY_SIZE(radeon_ttm_debugfs_list); |
fa8a1238 | 1184 | |
c52494f6 | 1185 | #ifdef CONFIG_SWIOTLB |
893d6e6e CK |
1186 | if (!swiotlb_nr_tbl()) |
1187 | --count; | |
c52494f6 | 1188 | #endif |
fa8a1238 | 1189 | |
893d6e6e CK |
1190 | return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count); |
1191 | #else | |
1192 | ||
fa8a1238 | 1193 | return 0; |
893d6e6e | 1194 | #endif |
fa8a1238 | 1195 | } |
2014b569 CK |
1196 | |
1197 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev) | |
1198 | { | |
1199 | #if defined(CONFIG_DEBUG_FS) | |
1200 | ||
1201 | debugfs_remove(rdev->mman.vram); | |
1202 | rdev->mman.vram = NULL; | |
dd66d20e CK |
1203 | |
1204 | debugfs_remove(rdev->mman.gtt); | |
1205 | rdev->mman.gtt = NULL; | |
2014b569 CK |
1206 | #endif |
1207 | } |