]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/radeon/radeon_ttm.c
drm/radeon: prepare header files for drmP.h removal
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / radeon / radeon_ttm.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
64a9dfc4
MY
32#include <drm/ttm/ttm_bo_api.h>
33#include <drm/ttm/ttm_bo_driver.h>
34#include <drm/ttm/ttm_placement.h>
35#include <drm/ttm/ttm_module.h>
36#include <drm/ttm/ttm_page_alloc.h>
771fe6b9
JG
37#include <drm/drmP.h>
38#include <drm/radeon_drm.h>
fa8a1238 39#include <linux/seq_file.h>
5a0e3ad6 40#include <linux/slab.h>
4cfe7629 41#include <linux/swiotlb.h>
f72a113a
CK
42#include <linux/swap.h>
43#include <linux/pagemap.h>
2014b569 44#include <linux/debugfs.h>
771fe6b9
JG
45#include "radeon_reg.h"
46#include "radeon.h"
47
fa8a1238 48static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
2014b569 49static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
fa8a1238 50
771fe6b9
JG
51static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
52{
53 struct radeon_mman *mman;
54 struct radeon_device *rdev;
55
56 mman = container_of(bdev, struct radeon_mman, bdev);
57 rdev = container_of(mman, struct radeon_device, mman);
58 return rdev;
59}
60
771fe6b9
JG
61static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
62{
63 return 0;
64}
65
66static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
67 struct ttm_mem_type_manager *man)
68{
69 struct radeon_device *rdev;
70
71 rdev = radeon_get_rdev(bdev);
72
73 switch (type) {
74 case TTM_PL_SYSTEM:
75 /* System memory */
76 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
77 man->available_caching = TTM_PL_MASK_CACHING;
78 man->default_caching = TTM_PL_FLAG_CACHED;
79 break;
80 case TTM_PL_TT:
d961db75 81 man->func = &ttm_bo_manager_func;
d594e46a 82 man->gpu_offset = rdev->mc.gtt_start;
771fe6b9
JG
83 man->available_caching = TTM_PL_MASK_CACHING;
84 man->default_caching = TTM_PL_FLAG_CACHED;
55c93278 85 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
a7fb8a23 86#if IS_ENABLED(CONFIG_AGP)
771fe6b9 87 if (rdev->flags & RADEON_IS_AGP) {
d9906753 88 if (!rdev->ddev->agp) {
771fe6b9
JG
89 DRM_ERROR("AGP is not enabled for memory type %u\n",
90 (unsigned)type);
91 return -EINVAL;
92 }
55c93278 93 if (!rdev->ddev->agp->cant_use_aperture)
0a2d50e3 94 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
771fe6b9
JG
95 man->available_caching = TTM_PL_FLAG_UNCACHED |
96 TTM_PL_FLAG_WC;
97 man->default_caching = TTM_PL_FLAG_WC;
771fe6b9 98 }
0c321c79 99#endif
771fe6b9
JG
100 break;
101 case TTM_PL_VRAM:
102 /* "On-card" video ram */
d961db75 103 man->func = &ttm_bo_manager_func;
d594e46a 104 man->gpu_offset = rdev->mc.vram_start;
771fe6b9 105 man->flags = TTM_MEMTYPE_FLAG_FIXED |
771fe6b9
JG
106 TTM_MEMTYPE_FLAG_MAPPABLE;
107 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
108 man->default_caching = TTM_PL_FLAG_WC;
771fe6b9
JG
109 break;
110 default:
111 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
112 return -EINVAL;
113 }
114 return 0;
115}
116
312ea8da
JG
117static void radeon_evict_flags(struct ttm_buffer_object *bo,
118 struct ttm_placement *placement)
771fe6b9 119{
46886dbf 120 static const struct ttm_place placements = {
f1217ed0
CK
121 .fpfn = 0,
122 .lpfn = 0,
123 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
124 };
125
d03d8589 126 struct radeon_bo *rbo;
d03d8589
JG
127
128 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
d03d8589
JG
129 placement->placement = &placements;
130 placement->busy_placement = &placements;
131 placement->num_placement = 1;
132 placement->num_busy_placement = 1;
133 return;
134 }
135 rbo = container_of(bo, struct radeon_bo, tbo);
771fe6b9 136 switch (bo->mem.mem_type) {
312ea8da 137 case TTM_PL_VRAM:
5e5c21ca 138 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
9270eb1b 139 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
2a85aedd
MD
140 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
141 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
142 unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
143 int i;
144
145 /* Try evicting to the CPU inaccessible part of VRAM
146 * first, but only set GTT as busy placement, so this
147 * BO will be evicted to GTT rather than causing other
148 * BOs to be evicted from VRAM
149 */
150 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
151 RADEON_GEM_DOMAIN_GTT);
152 rbo->placement.num_busy_placement = 0;
153 for (i = 0; i < rbo->placement.num_placement; i++) {
154 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
ce4b4f22
MD
155 if (rbo->placements[i].fpfn < fpfn)
156 rbo->placements[i].fpfn = fpfn;
2a85aedd
MD
157 } else {
158 rbo->placement.busy_placement =
159 &rbo->placements[i];
160 rbo->placement.num_busy_placement = 1;
161 }
162 }
163 } else
9270eb1b 164 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
312ea8da
JG
165 break;
166 case TTM_PL_TT:
771fe6b9 167 default:
312ea8da 168 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
771fe6b9 169 }
eaa5fd1a 170 *placement = rbo->placement;
771fe6b9
JG
171}
172
173static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
174{
acb46527
DH
175 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
176
b5dcec69
JG
177 if (radeon_ttm_tt_has_userptr(bo->ttm))
178 return -EPERM;
d9a1f0b4
DH
179 return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
180 filp->private_data);
771fe6b9
JG
181}
182
183static void radeon_move_null(struct ttm_buffer_object *bo,
184 struct ttm_mem_reg *new_mem)
185{
186 struct ttm_mem_reg *old_mem = &bo->mem;
187
188 BUG_ON(old_mem->mm_node != NULL);
189 *old_mem = *new_mem;
190 new_mem->mm_node = NULL;
191}
192
193static int radeon_move_blit(struct ttm_buffer_object *bo,
97a875cb 194 bool evict, bool no_wait_gpu,
9d87fa21
JG
195 struct ttm_mem_reg *new_mem,
196 struct ttm_mem_reg *old_mem)
771fe6b9
JG
197{
198 struct radeon_device *rdev;
199 uint64_t old_start, new_start;
876dc9f3 200 struct radeon_fence *fence;
57d20a43 201 unsigned num_pages;
876dc9f3 202 int r, ridx;
771fe6b9
JG
203
204 rdev = radeon_get_rdev(bo->bdev);
876dc9f3 205 ridx = radeon_copy_ring_index(rdev);
13f479b9
CK
206 old_start = (u64)old_mem->start << PAGE_SHIFT;
207 new_start = (u64)new_mem->start << PAGE_SHIFT;
771fe6b9
JG
208
209 switch (old_mem->mem_type) {
210 case TTM_PL_VRAM:
d594e46a 211 old_start += rdev->mc.vram_start;
771fe6b9
JG
212 break;
213 case TTM_PL_TT:
d594e46a 214 old_start += rdev->mc.gtt_start;
771fe6b9
JG
215 break;
216 default:
217 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
218 return -EINVAL;
219 }
220 switch (new_mem->mem_type) {
221 case TTM_PL_VRAM:
d594e46a 222 new_start += rdev->mc.vram_start;
771fe6b9
JG
223 break;
224 case TTM_PL_TT:
d594e46a 225 new_start += rdev->mc.gtt_start;
771fe6b9
JG
226 break;
227 default:
228 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
229 return -EINVAL;
230 }
876dc9f3 231 if (!rdev->ring[ridx].ready) {
3000bf39 232 DRM_ERROR("Trying to move memory with ring turned off.\n");
771fe6b9
JG
233 return -EINVAL;
234 }
003cefe0
AD
235
236 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
237
57d20a43
CK
238 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
239 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
240 if (IS_ERR(fence))
241 return PTR_ERR(fence);
242
74561cd4 243 r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
771fe6b9
JG
244 radeon_fence_unref(&fence);
245 return r;
246}
247
248static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
9d87fa21 249 bool evict, bool interruptible,
97a875cb 250 bool no_wait_gpu,
771fe6b9
JG
251 struct ttm_mem_reg *new_mem)
252{
c13c55d6 253 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
771fe6b9
JG
254 struct ttm_mem_reg *old_mem = &bo->mem;
255 struct ttm_mem_reg tmp_mem;
f1217ed0 256 struct ttm_place placements;
312ea8da 257 struct ttm_placement placement;
771fe6b9
JG
258 int r;
259
771fe6b9
JG
260 tmp_mem = *new_mem;
261 tmp_mem.mm_node = NULL;
312ea8da
JG
262 placement.num_placement = 1;
263 placement.placement = &placements;
264 placement.num_busy_placement = 1;
265 placement.busy_placement = &placements;
f1217ed0
CK
266 placements.fpfn = 0;
267 placements.lpfn = 0;
268 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
c13c55d6 269 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
771fe6b9
JG
270 if (unlikely(r)) {
271 return r;
272 }
df67bed9
DA
273
274 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
275 if (unlikely(r)) {
276 goto out_cleanup;
277 }
278
993baf15 279 r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx);
771fe6b9
JG
280 if (unlikely(r)) {
281 goto out_cleanup;
282 }
97a875cb 283 r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
771fe6b9
JG
284 if (unlikely(r)) {
285 goto out_cleanup;
286 }
3e98d829 287 r = ttm_bo_move_ttm(bo, &ctx, new_mem);
771fe6b9 288out_cleanup:
42311ff9 289 ttm_bo_mem_put(bo, &tmp_mem);
771fe6b9
JG
290 return r;
291}
292
293static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
9d87fa21 294 bool evict, bool interruptible,
97a875cb 295 bool no_wait_gpu,
771fe6b9
JG
296 struct ttm_mem_reg *new_mem)
297{
c13c55d6 298 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
771fe6b9
JG
299 struct ttm_mem_reg *old_mem = &bo->mem;
300 struct ttm_mem_reg tmp_mem;
312ea8da 301 struct ttm_placement placement;
f1217ed0 302 struct ttm_place placements;
771fe6b9
JG
303 int r;
304
771fe6b9
JG
305 tmp_mem = *new_mem;
306 tmp_mem.mm_node = NULL;
312ea8da
JG
307 placement.num_placement = 1;
308 placement.placement = &placements;
309 placement.num_busy_placement = 1;
310 placement.busy_placement = &placements;
f1217ed0
CK
311 placements.fpfn = 0;
312 placements.lpfn = 0;
313 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
c13c55d6 314 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
771fe6b9
JG
315 if (unlikely(r)) {
316 return r;
317 }
3e98d829 318 r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
771fe6b9
JG
319 if (unlikely(r)) {
320 goto out_cleanup;
321 }
97a875cb 322 r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
771fe6b9
JG
323 if (unlikely(r)) {
324 goto out_cleanup;
325 }
326out_cleanup:
42311ff9 327 ttm_bo_mem_put(bo, &tmp_mem);
771fe6b9
JG
328 return r;
329}
330
2823f4f0
CK
331static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
332 struct ttm_operation_ctx *ctx,
333 struct ttm_mem_reg *new_mem)
771fe6b9
JG
334{
335 struct radeon_device *rdev;
e1a575ad 336 struct radeon_bo *rbo;
771fe6b9
JG
337 struct ttm_mem_reg *old_mem = &bo->mem;
338 int r;
339
2823f4f0 340 r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
88932a7b
CK
341 if (r)
342 return r;
343
e1a575ad
MD
344 /* Can't move a pinned BO */
345 rbo = container_of(bo, struct radeon_bo, tbo);
346 if (WARN_ON_ONCE(rbo->pin_count > 0))
347 return -EINVAL;
348
771fe6b9
JG
349 rdev = radeon_get_rdev(bo->bdev);
350 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
351 radeon_move_null(bo, new_mem);
352 return 0;
353 }
354 if ((old_mem->mem_type == TTM_PL_TT &&
355 new_mem->mem_type == TTM_PL_SYSTEM) ||
356 (old_mem->mem_type == TTM_PL_SYSTEM &&
357 new_mem->mem_type == TTM_PL_TT)) {
af901ca1 358 /* bind is enough */
771fe6b9
JG
359 radeon_move_null(bo, new_mem);
360 return 0;
361 }
27cd7769
AD
362 if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
363 rdev->asic->copy.copy == NULL) {
771fe6b9 364 /* use memcpy */
1ab2e105 365 goto memcpy;
771fe6b9
JG
366 }
367
368 if (old_mem->mem_type == TTM_PL_VRAM &&
369 new_mem->mem_type == TTM_PL_SYSTEM) {
2823f4f0
CK
370 r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
371 ctx->no_wait_gpu, new_mem);
771fe6b9
JG
372 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
373 new_mem->mem_type == TTM_PL_VRAM) {
2823f4f0
CK
374 r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
375 ctx->no_wait_gpu, new_mem);
771fe6b9 376 } else {
2823f4f0
CK
377 r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
378 new_mem, old_mem);
771fe6b9 379 }
1ab2e105
MD
380
381 if (r) {
382memcpy:
3e98d829 383 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
67e8e3f9
MO
384 if (r) {
385 return r;
386 }
1ab2e105 387 }
67e8e3f9
MO
388
389 /* update statistics */
390 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
391 return 0;
771fe6b9
JG
392}
393
0a2d50e3
JG
394static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
395{
396 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
397 struct radeon_device *rdev = radeon_get_rdev(bdev);
398
399 mem->bus.addr = NULL;
400 mem->bus.offset = 0;
401 mem->bus.size = mem->num_pages << PAGE_SHIFT;
402 mem->bus.base = 0;
403 mem->bus.is_iomem = false;
404 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
405 return -EINVAL;
406 switch (mem->mem_type) {
407 case TTM_PL_SYSTEM:
408 /* system memory */
409 return 0;
410 case TTM_PL_TT:
a7fb8a23 411#if IS_ENABLED(CONFIG_AGP)
0a2d50e3
JG
412 if (rdev->flags & RADEON_IS_AGP) {
413 /* RADEON_IS_AGP is set only if AGP is active */
d961db75 414 mem->bus.offset = mem->start << PAGE_SHIFT;
0a2d50e3 415 mem->bus.base = rdev->mc.agp_base;
365048ff 416 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
0a2d50e3
JG
417 }
418#endif
419 break;
420 case TTM_PL_VRAM:
d961db75 421 mem->bus.offset = mem->start << PAGE_SHIFT;
0a2d50e3
JG
422 /* check if it's visible */
423 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
424 return -EINVAL;
425 mem->bus.base = rdev->mc.aper_base;
426 mem->bus.is_iomem = true;
ffb57c4b
JE
427#ifdef __alpha__
428 /*
429 * Alpha: use bus.addr to hold the ioremap() return,
430 * so we can modify bus.base below.
431 */
432 if (mem->placement & TTM_PL_FLAG_WC)
433 mem->bus.addr =
434 ioremap_wc(mem->bus.base + mem->bus.offset,
435 mem->bus.size);
436 else
437 mem->bus.addr =
438 ioremap_nocache(mem->bus.base + mem->bus.offset,
439 mem->bus.size);
3b2c6932
AY
440 if (!mem->bus.addr)
441 return -ENOMEM;
ffb57c4b
JE
442
443 /*
444 * Alpha: Use just the bus offset plus
445 * the hose/domain memory base for bus.base.
446 * It then can be used to build PTEs for VRAM
447 * access, as done in ttm_bo_vm_fault().
448 */
449 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
450 rdev->ddev->hose->dense_mem_base;
451#endif
0a2d50e3
JG
452 break;
453 default:
454 return -EINVAL;
455 }
456 return 0;
457}
458
459static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
460{
461}
462
649bf3ca
JG
463/*
464 * TTM backend functions.
465 */
466struct radeon_ttm_tt {
8e7e7052 467 struct ttm_dma_tt ttm;
649bf3ca
JG
468 struct radeon_device *rdev;
469 u64 offset;
f72a113a
CK
470
471 uint64_t userptr;
472 struct mm_struct *usermm;
473 uint32_t userflags;
649bf3ca
JG
474};
475
f72a113a
CK
476/* prepare the sg table with the user pages */
477static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
478{
479 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
480 struct radeon_ttm_tt *gtt = (void *)ttm;
481 unsigned pinned = 0, nents;
482 int r;
483
484 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
485 enum dma_data_direction direction = write ?
486 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
487
488 if (current->mm != gtt->usermm)
489 return -EPERM;
490
ddd00e33
CK
491 if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
492 /* check that we only pin down anonymous memory
493 to prevent problems with writeback */
494 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
495 struct vm_area_struct *vma;
496 vma = find_vma(gtt->usermm, gtt->userptr);
497 if (!vma || vma->vm_file || vma->vm_end < end)
498 return -EPERM;
499 }
500
f72a113a
CK
501 do {
502 unsigned num_pages = ttm->num_pages - pinned;
503 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
504 struct page **pages = ttm->pages + pinned;
505
768ae309
LS
506 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
507 pages, NULL);
f72a113a
CK
508 if (r < 0)
509 goto release_pages;
510
511 pinned += r;
512
513 } while (pinned < ttm->num_pages);
514
515 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
516 ttm->num_pages << PAGE_SHIFT,
517 GFP_KERNEL);
518 if (r)
519 goto release_sg;
520
521 r = -ENOMEM;
522 nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
523 if (nents != ttm->sg->nents)
524 goto release_sg;
525
526 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
527 gtt->ttm.dma_address, ttm->num_pages);
528
529 return 0;
530
531release_sg:
532 kfree(ttm->sg);
533
534release_pages:
c6f92f9f 535 release_pages(ttm->pages, pinned);
f72a113a
CK
536 return r;
537}
538
539static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
540{
541 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
542 struct radeon_ttm_tt *gtt = (void *)ttm;
db12973c 543 struct sg_page_iter sg_iter;
f72a113a
CK
544
545 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
546 enum dma_data_direction direction = write ?
547 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
548
863653fe
CK
549 /* double check that we don't free the table twice */
550 if (!ttm->sg->sgl)
551 return;
552
f72a113a
CK
553 /* free the sg table and pages again */
554 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
555
db12973c 556 for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
557 struct page *page = sg_page_iter_page(&sg_iter);
f72a113a
CK
558 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
559 set_page_dirty(page);
560
561 mark_page_accessed(page);
09cbfeaf 562 put_page(page);
f72a113a
CK
563 }
564
565 sg_free_table(ttm->sg);
566}
567
649bf3ca
JG
568static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
569 struct ttm_mem_reg *bo_mem)
570{
8e7e7052 571 struct radeon_ttm_tt *gtt = (void*)ttm;
77497f27
MD
572 uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
573 RADEON_GART_PAGE_WRITE;
649bf3ca
JG
574 int r;
575
f72a113a
CK
576 if (gtt->userptr) {
577 radeon_ttm_tt_pin_userptr(ttm);
578 flags &= ~RADEON_GART_PAGE_WRITE;
579 }
580
649bf3ca
JG
581 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
582 if (!ttm->num_pages) {
583 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
584 ttm->num_pages, bo_mem, ttm);
585 }
77497f27
MD
586 if (ttm->caching_state == tt_cached)
587 flags |= RADEON_GART_PAGE_SNOOP;
588 r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
589 ttm->pages, gtt->ttm.dma_address, flags);
649bf3ca
JG
590 if (r) {
591 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
592 ttm->num_pages, (unsigned)gtt->offset);
593 return r;
594 }
595 return 0;
596}
597
598static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
599{
8e7e7052 600 struct radeon_ttm_tt *gtt = (void *)ttm;
649bf3ca 601
649bf3ca 602 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
f72a113a
CK
603
604 if (gtt->userptr)
605 radeon_ttm_tt_unpin_userptr(ttm);
606
649bf3ca
JG
607 return 0;
608}
609
610static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
611{
8e7e7052 612 struct radeon_ttm_tt *gtt = (void *)ttm;
649bf3ca 613
8e7e7052 614 ttm_dma_tt_fini(&gtt->ttm);
649bf3ca
JG
615 kfree(gtt);
616}
617
618static struct ttm_backend_func radeon_backend_func = {
619 .bind = &radeon_ttm_backend_bind,
620 .unbind = &radeon_ttm_backend_unbind,
621 .destroy = &radeon_ttm_backend_destroy,
622};
623
dde5da23
CK
624static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
625 uint32_t page_flags)
649bf3ca
JG
626{
627 struct radeon_device *rdev;
628 struct radeon_ttm_tt *gtt;
629
dde5da23 630 rdev = radeon_get_rdev(bo->bdev);
a7fb8a23 631#if IS_ENABLED(CONFIG_AGP)
649bf3ca 632 if (rdev->flags & RADEON_IS_AGP) {
dde5da23
CK
633 return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
634 page_flags);
649bf3ca
JG
635 }
636#endif
637
638 gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
639 if (gtt == NULL) {
640 return NULL;
641 }
8e7e7052 642 gtt->ttm.ttm.func = &radeon_backend_func;
649bf3ca 643 gtt->rdev = rdev;
dde5da23 644 if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
8e7e7052 645 kfree(gtt);
649bf3ca
JG
646 return NULL;
647 }
8e7e7052 648 return &gtt->ttm.ttm;
649bf3ca
JG
649}
650
3840a656
CK
651static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
652{
653 if (!ttm || ttm->func != &radeon_backend_func)
654 return NULL;
655 return (struct radeon_ttm_tt *)ttm;
656}
657
d0cef9fa
RH
658static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
659 struct ttm_operation_ctx *ctx)
c52494f6 660{
3840a656 661 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
c52494f6 662 struct radeon_device *rdev;
40f5cf99 663 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
c52494f6 664
3840a656 665 if (gtt && gtt->userptr) {
69ee2410 666 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
f72a113a
CK
667 if (!ttm->sg)
668 return -ENOMEM;
669
670 ttm->page_flags |= TTM_PAGE_FLAG_SG;
671 ttm->state = tt_unbound;
672 return 0;
673 }
674
40f5cf99
AD
675 if (slave && ttm->sg) {
676 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
677 gtt->ttm.dma_address, ttm->num_pages);
678 ttm->state = tt_unbound;
679 return 0;
680 }
681
c52494f6 682 rdev = radeon_get_rdev(ttm->bdev);
a7fb8a23 683#if IS_ENABLED(CONFIG_AGP)
dea7e0ac 684 if (rdev->flags & RADEON_IS_AGP) {
d0cef9fa 685 return ttm_agp_tt_populate(ttm, ctx);
dea7e0ac
JG
686 }
687#endif
c52494f6
KRW
688
689#ifdef CONFIG_SWIOTLB
1bc3d3cc 690 if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
d0cef9fa 691 return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
c52494f6
KRW
692 }
693#endif
694
d0cef9fa 695 return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
c52494f6
KRW
696}
697
698static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
699{
700 struct radeon_device *rdev;
3840a656 701 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
40f5cf99
AD
702 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
703
3840a656 704 if (gtt && gtt->userptr) {
f72a113a
CK
705 kfree(ttm->sg);
706 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
707 return;
708 }
709
40f5cf99
AD
710 if (slave)
711 return;
c52494f6
KRW
712
713 rdev = radeon_get_rdev(ttm->bdev);
a7fb8a23 714#if IS_ENABLED(CONFIG_AGP)
dea7e0ac
JG
715 if (rdev->flags & RADEON_IS_AGP) {
716 ttm_agp_tt_unpopulate(ttm);
717 return;
718 }
719#endif
c52494f6
KRW
720
721#ifdef CONFIG_SWIOTLB
1bc3d3cc 722 if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
8e7e7052 723 ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
c52494f6
KRW
724 return;
725 }
726#endif
727
f7871fd1 728 ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
c52494f6 729}
649bf3ca 730
f72a113a
CK
731int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
732 uint32_t flags)
733{
3840a656 734 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
f72a113a
CK
735
736 if (gtt == NULL)
737 return -EINVAL;
738
739 gtt->userptr = addr;
740 gtt->usermm = current->mm;
741 gtt->userflags = flags;
742 return 0;
743}
744
745bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
746{
3840a656 747 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
f72a113a
CK
748
749 if (gtt == NULL)
750 return false;
751
752 return !!gtt->userptr;
753}
754
755bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
756{
3840a656 757 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
f72a113a
CK
758
759 if (gtt == NULL)
760 return false;
761
762 return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
763}
764
771fe6b9 765static struct ttm_bo_driver radeon_bo_driver = {
649bf3ca 766 .ttm_tt_create = &radeon_ttm_tt_create,
c52494f6
KRW
767 .ttm_tt_populate = &radeon_ttm_tt_populate,
768 .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
771fe6b9
JG
769 .invalidate_caches = &radeon_invalidate_caches,
770 .init_mem_type = &radeon_init_mem_type,
a2ab19fe 771 .eviction_valuable = ttm_bo_eviction_valuable,
771fe6b9
JG
772 .evict_flags = &radeon_evict_flags,
773 .move = &radeon_bo_move,
774 .verify_access = &radeon_verify_access,
e024e110
DA
775 .move_notify = &radeon_bo_move_notify,
776 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
0a2d50e3
JG
777 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
778 .io_mem_free = &radeon_ttm_io_mem_free,
771fe6b9
JG
779};
780
781int radeon_ttm_init(struct radeon_device *rdev)
782{
783 int r;
784
771fe6b9
JG
785 /* No others user of address space so set it to 0 */
786 r = ttm_bo_device_init(&rdev->mman.bdev,
44d847b7
DH
787 &radeon_bo_driver,
788 rdev->ddev->anon_inode->i_mapping,
ad49f501 789 rdev->need_dma32);
771fe6b9
JG
790 if (r) {
791 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
792 return r;
793 }
0a0c7596 794 rdev->mman.initialized = true;
4c788679 795 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
312ea8da 796 rdev->mc.real_vram_size >> PAGE_SHIFT);
771fe6b9
JG
797 if (r) {
798 DRM_ERROR("Failed initializing VRAM heap.\n");
799 return r;
800 }
14eedc32
LK
801 /* Change the size here instead of the init above so only lpfn is affected */
802 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
803
441921d5 804 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
831b6966 805 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
4aa5b92f 806 NULL, &rdev->stolen_vga_memory);
771fe6b9
JG
807 if (r) {
808 return r;
809 }
4aa5b92f 810 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
4c788679
JG
811 if (r)
812 return r;
4aa5b92f
KR
813 r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
814 radeon_bo_unreserve(rdev->stolen_vga_memory);
771fe6b9 815 if (r) {
4aa5b92f 816 radeon_bo_unref(&rdev->stolen_vga_memory);
771fe6b9
JG
817 return r;
818 }
819 DRM_INFO("radeon: %uM of VRAM memory ready\n",
fc986034 820 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
4c788679 821 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
312ea8da 822 rdev->mc.gtt_size >> PAGE_SHIFT);
771fe6b9
JG
823 if (r) {
824 DRM_ERROR("Failed initializing GTT heap.\n");
825 return r;
826 }
827 DRM_INFO("radeon: %uM of GTT memory ready.\n",
3ce0a23d 828 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
fa8a1238
DA
829
830 r = radeon_ttm_debugfs_init(rdev);
831 if (r) {
832 DRM_ERROR("Failed to init debugfs\n");
833 return r;
834 }
771fe6b9
JG
835 return 0;
836}
837
838void radeon_ttm_fini(struct radeon_device *rdev)
839{
4c788679
JG
840 int r;
841
0a0c7596
JG
842 if (!rdev->mman.initialized)
843 return;
2014b569 844 radeon_ttm_debugfs_fini(rdev);
4aa5b92f
KR
845 if (rdev->stolen_vga_memory) {
846 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
4c788679 847 if (r == 0) {
4aa5b92f
KR
848 radeon_bo_unpin(rdev->stolen_vga_memory);
849 radeon_bo_unreserve(rdev->stolen_vga_memory);
4c788679 850 }
4aa5b92f 851 radeon_bo_unref(&rdev->stolen_vga_memory);
771fe6b9
JG
852 }
853 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
854 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
855 ttm_bo_device_release(&rdev->mman.bdev);
856 radeon_gart_fini(rdev);
0a0c7596 857 rdev->mman.initialized = false;
771fe6b9
JG
858 DRM_INFO("radeon: ttm finalized\n");
859}
860
53595338
DA
861/* this should only be called at bootup or when userspace
862 * isn't running */
863void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
864{
865 struct ttm_mem_type_manager *man;
866
867 if (!rdev->mman.initialized)
868 return;
869
870 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
871 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
872 man->size = size >> PAGE_SHIFT;
873}
874
771fe6b9 875static struct vm_operations_struct radeon_ttm_vm_ops;
f0f37e2f 876static const struct vm_operations_struct *ttm_vm_ops = NULL;
771fe6b9 877
2bfb0b67 878static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
771fe6b9
JG
879{
880 struct ttm_buffer_object *bo;
5876dd24 881 struct radeon_device *rdev;
2bfb0b67 882 vm_fault_t ret;
771fe6b9 883
11bac800 884 bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
771fe6b9
JG
885 if (bo == NULL) {
886 return VM_FAULT_NOPAGE;
887 }
5876dd24 888 rdev = radeon_get_rdev(bo->bdev);
db7fce39 889 down_read(&rdev->pm.mclk_lock);
2bfb0b67 890 ret = ttm_vm_ops->fault(vmf);
db7fce39 891 up_read(&rdev->pm.mclk_lock);
2bfb0b67 892 return ret;
771fe6b9
JG
893}
894
895int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
896{
771fe6b9 897 int r;
bed2dd84
TZ
898 struct drm_file *file_priv = filp->private_data;
899 struct radeon_device *rdev = file_priv->minor->dev->dev_private;
771fe6b9 900
771fe6b9
JG
901 if (rdev == NULL) {
902 return -EINVAL;
903 }
904 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
905 if (unlikely(r != 0)) {
906 return r;
907 }
908 if (unlikely(ttm_vm_ops == NULL)) {
909 ttm_vm_ops = vma->vm_ops;
910 radeon_ttm_vm_ops = *ttm_vm_ops;
911 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
912 }
913 vma->vm_ops = &radeon_ttm_vm_ops;
914 return 0;
915}
916
fa8a1238 917#if defined(CONFIG_DEBUG_FS)
893d6e6e 918
fa8a1238
DA
919static int radeon_mm_dump_table(struct seq_file *m, void *data)
920{
921 struct drm_info_node *node = (struct drm_info_node *)m->private;
bbbb29ef 922 unsigned ttm_pl = *(int*)node->info_ent->data;
fa8a1238
DA
923 struct drm_device *dev = node->minor->dev;
924 struct radeon_device *rdev = dev->dev_private;
bbbb29ef 925 struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
b5c3714f 926 struct drm_printer p = drm_seq_file_printer(m);
fa8a1238 927
bbbb29ef 928 man->func->debug(man, &p);
b5c3714f 929 return 0;
fa8a1238 930}
893d6e6e 931
bbbb29ef 932
893d6e6e
CK
933static int ttm_pl_vram = TTM_PL_VRAM;
934static int ttm_pl_tt = TTM_PL_TT;
935
936static struct drm_info_list radeon_ttm_debugfs_list[] = {
937 {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
938 {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
939 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
940#ifdef CONFIG_SWIOTLB
941 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
942#endif
943};
944
2014b569
CK
945static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
946{
947 struct radeon_device *rdev = inode->i_private;
948 i_size_write(inode, rdev->mc.mc_vram_size);
949 filep->private_data = inode->i_private;
950 return 0;
951}
952
953static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
954 size_t size, loff_t *pos)
955{
956 struct radeon_device *rdev = f->private_data;
957 ssize_t result = 0;
958 int r;
959
960 if (size & 0x3 || *pos & 0x3)
961 return -EINVAL;
962
963 while (size) {
964 unsigned long flags;
965 uint32_t value;
966
967 if (*pos >= rdev->mc.mc_vram_size)
968 return result;
969
970 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
971 WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
972 if (rdev->family >= CHIP_CEDAR)
973 WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
974 value = RREG32(RADEON_MM_DATA);
975 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
976
977 r = put_user(value, (uint32_t *)buf);
978 if (r)
979 return r;
980
981 result += 4;
982 buf += 4;
983 *pos += 4;
984 size -= 4;
985 }
986
987 return result;
988}
989
990static const struct file_operations radeon_ttm_vram_fops = {
991 .owner = THIS_MODULE,
992 .open = radeon_ttm_vram_open,
993 .read = radeon_ttm_vram_read,
994 .llseek = default_llseek
995};
996
dd66d20e
CK
997static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
998{
999 struct radeon_device *rdev = inode->i_private;
1000 i_size_write(inode, rdev->mc.gtt_size);
1001 filep->private_data = inode->i_private;
1002 return 0;
1003}
1004
1005static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
1006 size_t size, loff_t *pos)
1007{
1008 struct radeon_device *rdev = f->private_data;
1009 ssize_t result = 0;
1010 int r;
1011
1012 while (size) {
1013 loff_t p = *pos / PAGE_SIZE;
1014 unsigned off = *pos & ~PAGE_MASK;
0d997b68 1015 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
dd66d20e
CK
1016 struct page *page;
1017 void *ptr;
1018
1019 if (p >= rdev->gart.num_cpu_pages)
1020 return result;
1021
1022 page = rdev->gart.pages[p];
1023 if (page) {
1024 ptr = kmap(page);
1025 ptr += off;
1026
1027 r = copy_to_user(buf, ptr, cur_size);
1028 kunmap(rdev->gart.pages[p]);
1029 } else
1030 r = clear_user(buf, cur_size);
1031
1032 if (r)
1033 return -EFAULT;
1034
1035 result += cur_size;
1036 buf += cur_size;
1037 *pos += cur_size;
1038 size -= cur_size;
1039 }
1040
1041 return result;
1042}
1043
1044static const struct file_operations radeon_ttm_gtt_fops = {
1045 .owner = THIS_MODULE,
1046 .open = radeon_ttm_gtt_open,
1047 .read = radeon_ttm_gtt_read,
1048 .llseek = default_llseek
1049};
1050
fa8a1238
DA
1051#endif
1052
1053static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
1054{
f4e45d02 1055#if defined(CONFIG_DEBUG_FS)
2014b569
CK
1056 unsigned count;
1057
1058 struct drm_minor *minor = rdev->ddev->primary;
1059 struct dentry *ent, *root = minor->debugfs_root;
1060
1061 ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
1062 rdev, &radeon_ttm_vram_fops);
1063 if (IS_ERR(ent))
1064 return PTR_ERR(ent);
1065 rdev->mman.vram = ent;
1066
dd66d20e
CK
1067 ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
1068 rdev, &radeon_ttm_gtt_fops);
1069 if (IS_ERR(ent))
1070 return PTR_ERR(ent);
1071 rdev->mman.gtt = ent;
1072
2014b569 1073 count = ARRAY_SIZE(radeon_ttm_debugfs_list);
fa8a1238 1074
c52494f6 1075#ifdef CONFIG_SWIOTLB
1bc3d3cc 1076 if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
893d6e6e 1077 --count;
c52494f6 1078#endif
fa8a1238 1079
893d6e6e
CK
1080 return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
1081#else
1082
fa8a1238 1083 return 0;
893d6e6e 1084#endif
fa8a1238 1085}
2014b569
CK
1086
1087static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
1088{
1089#if defined(CONFIG_DEBUG_FS)
1090
1091 debugfs_remove(rdev->mman.vram);
1092 rdev->mman.vram = NULL;
dd66d20e
CK
1093
1094 debugfs_remove(rdev->mman.gtt);
1095 rdev->mman.gtt = NULL;
2014b569
CK
1096#endif
1097}