]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
8d7cddcd | 36 | #include <ttm/ttm_page_alloc.h> |
771fe6b9 JG |
37 | #include <drm/drmP.h> |
38 | #include <drm/radeon_drm.h> | |
fa8a1238 | 39 | #include <linux/seq_file.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
771fe6b9 JG |
41 | #include "radeon_reg.h" |
42 | #include "radeon.h" | |
43 | ||
44 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
45 | ||
fa8a1238 DA |
46 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
47 | ||
771fe6b9 JG |
48 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
49 | { | |
50 | struct radeon_mman *mman; | |
51 | struct radeon_device *rdev; | |
52 | ||
53 | mman = container_of(bdev, struct radeon_mman, bdev); | |
54 | rdev = container_of(mman, struct radeon_device, mman); | |
55 | return rdev; | |
56 | } | |
57 | ||
58 | ||
59 | /* | |
60 | * Global memory. | |
61 | */ | |
ba4420c2 | 62 | static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) |
771fe6b9 JG |
63 | { |
64 | return ttm_mem_global_init(ref->object); | |
65 | } | |
66 | ||
ba4420c2 | 67 | static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) |
771fe6b9 JG |
68 | { |
69 | ttm_mem_global_release(ref->object); | |
70 | } | |
71 | ||
72 | static int radeon_ttm_global_init(struct radeon_device *rdev) | |
73 | { | |
ba4420c2 | 74 | struct drm_global_reference *global_ref; |
771fe6b9 JG |
75 | int r; |
76 | ||
77 | rdev->mman.mem_global_referenced = false; | |
78 | global_ref = &rdev->mman.mem_global_ref; | |
ba4420c2 | 79 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
771fe6b9 JG |
80 | global_ref->size = sizeof(struct ttm_mem_global); |
81 | global_ref->init = &radeon_ttm_mem_global_init; | |
82 | global_ref->release = &radeon_ttm_mem_global_release; | |
ba4420c2 | 83 | r = drm_global_item_ref(global_ref); |
771fe6b9 | 84 | if (r != 0) { |
a987fcaa TH |
85 | DRM_ERROR("Failed setting up TTM memory accounting " |
86 | "subsystem.\n"); | |
771fe6b9 JG |
87 | return r; |
88 | } | |
a987fcaa TH |
89 | |
90 | rdev->mman.bo_global_ref.mem_glob = | |
91 | rdev->mman.mem_global_ref.object; | |
92 | global_ref = &rdev->mman.bo_global_ref.ref; | |
ba4420c2 | 93 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
7f5f4db2 | 94 | global_ref->size = sizeof(struct ttm_bo_global); |
a987fcaa TH |
95 | global_ref->init = &ttm_bo_global_init; |
96 | global_ref->release = &ttm_bo_global_release; | |
ba4420c2 | 97 | r = drm_global_item_ref(global_ref); |
a987fcaa TH |
98 | if (r != 0) { |
99 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
ba4420c2 | 100 | drm_global_item_unref(&rdev->mman.mem_global_ref); |
a987fcaa TH |
101 | return r; |
102 | } | |
103 | ||
771fe6b9 JG |
104 | rdev->mman.mem_global_referenced = true; |
105 | return 0; | |
106 | } | |
107 | ||
108 | static void radeon_ttm_global_fini(struct radeon_device *rdev) | |
109 | { | |
110 | if (rdev->mman.mem_global_referenced) { | |
ba4420c2 DA |
111 | drm_global_item_unref(&rdev->mman.bo_global_ref.ref); |
112 | drm_global_item_unref(&rdev->mman.mem_global_ref); | |
771fe6b9 JG |
113 | rdev->mman.mem_global_referenced = false; |
114 | } | |
115 | } | |
116 | ||
771fe6b9 JG |
117 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
118 | { | |
119 | return 0; | |
120 | } | |
121 | ||
122 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
123 | struct ttm_mem_type_manager *man) | |
124 | { | |
125 | struct radeon_device *rdev; | |
126 | ||
127 | rdev = radeon_get_rdev(bdev); | |
128 | ||
129 | switch (type) { | |
130 | case TTM_PL_SYSTEM: | |
131 | /* System memory */ | |
132 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
133 | man->available_caching = TTM_PL_MASK_CACHING; | |
134 | man->default_caching = TTM_PL_FLAG_CACHED; | |
135 | break; | |
136 | case TTM_PL_TT: | |
d961db75 | 137 | man->func = &ttm_bo_manager_func; |
d594e46a | 138 | man->gpu_offset = rdev->mc.gtt_start; |
771fe6b9 JG |
139 | man->available_caching = TTM_PL_MASK_CACHING; |
140 | man->default_caching = TTM_PL_FLAG_CACHED; | |
55c93278 | 141 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
771fe6b9 JG |
142 | #if __OS_HAS_AGP |
143 | if (rdev->flags & RADEON_IS_AGP) { | |
144 | if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { | |
145 | DRM_ERROR("AGP is not enabled for memory type %u\n", | |
146 | (unsigned)type); | |
147 | return -EINVAL; | |
148 | } | |
55c93278 | 149 | if (!rdev->ddev->agp->cant_use_aperture) |
0a2d50e3 | 150 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
771fe6b9 JG |
151 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
152 | TTM_PL_FLAG_WC; | |
153 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 | 154 | } |
0c321c79 | 155 | #endif |
771fe6b9 JG |
156 | break; |
157 | case TTM_PL_VRAM: | |
158 | /* "On-card" video ram */ | |
d961db75 | 159 | man->func = &ttm_bo_manager_func; |
d594e46a | 160 | man->gpu_offset = rdev->mc.vram_start; |
771fe6b9 | 161 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
771fe6b9 JG |
162 | TTM_MEMTYPE_FLAG_MAPPABLE; |
163 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
164 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 JG |
165 | break; |
166 | default: | |
167 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
168 | return -EINVAL; | |
169 | } | |
170 | return 0; | |
171 | } | |
172 | ||
312ea8da JG |
173 | static void radeon_evict_flags(struct ttm_buffer_object *bo, |
174 | struct ttm_placement *placement) | |
771fe6b9 | 175 | { |
d03d8589 JG |
176 | struct radeon_bo *rbo; |
177 | static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
178 | ||
179 | if (!radeon_ttm_bo_is_radeon_bo(bo)) { | |
180 | placement->fpfn = 0; | |
181 | placement->lpfn = 0; | |
182 | placement->placement = &placements; | |
183 | placement->busy_placement = &placements; | |
184 | placement->num_placement = 1; | |
185 | placement->num_busy_placement = 1; | |
186 | return; | |
187 | } | |
188 | rbo = container_of(bo, struct radeon_bo, tbo); | |
771fe6b9 | 189 | switch (bo->mem.mem_type) { |
312ea8da | 190 | case TTM_PL_VRAM: |
e32eb50d | 191 | if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) |
9270eb1b DA |
192 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
193 | else | |
194 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
312ea8da JG |
195 | break; |
196 | case TTM_PL_TT: | |
771fe6b9 | 197 | default: |
312ea8da | 198 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
771fe6b9 | 199 | } |
eaa5fd1a | 200 | *placement = rbo->placement; |
771fe6b9 JG |
201 | } |
202 | ||
203 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
204 | { | |
205 | return 0; | |
206 | } | |
207 | ||
208 | static void radeon_move_null(struct ttm_buffer_object *bo, | |
209 | struct ttm_mem_reg *new_mem) | |
210 | { | |
211 | struct ttm_mem_reg *old_mem = &bo->mem; | |
212 | ||
213 | BUG_ON(old_mem->mm_node != NULL); | |
214 | *old_mem = *new_mem; | |
215 | new_mem->mm_node = NULL; | |
216 | } | |
217 | ||
218 | static int radeon_move_blit(struct ttm_buffer_object *bo, | |
9d87fa21 JG |
219 | bool evict, int no_wait_reserve, bool no_wait_gpu, |
220 | struct ttm_mem_reg *new_mem, | |
221 | struct ttm_mem_reg *old_mem) | |
771fe6b9 JG |
222 | { |
223 | struct radeon_device *rdev; | |
224 | uint64_t old_start, new_start; | |
225 | struct radeon_fence *fence; | |
3000bf39 | 226 | int r, i; |
771fe6b9 JG |
227 | |
228 | rdev = radeon_get_rdev(bo->bdev); | |
27cd7769 | 229 | r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev)); |
771fe6b9 JG |
230 | if (unlikely(r)) { |
231 | return r; | |
232 | } | |
d961db75 BS |
233 | old_start = old_mem->start << PAGE_SHIFT; |
234 | new_start = new_mem->start << PAGE_SHIFT; | |
771fe6b9 JG |
235 | |
236 | switch (old_mem->mem_type) { | |
237 | case TTM_PL_VRAM: | |
d594e46a | 238 | old_start += rdev->mc.vram_start; |
771fe6b9 JG |
239 | break; |
240 | case TTM_PL_TT: | |
d594e46a | 241 | old_start += rdev->mc.gtt_start; |
771fe6b9 JG |
242 | break; |
243 | default: | |
244 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
245 | return -EINVAL; | |
246 | } | |
247 | switch (new_mem->mem_type) { | |
248 | case TTM_PL_VRAM: | |
d594e46a | 249 | new_start += rdev->mc.vram_start; |
771fe6b9 JG |
250 | break; |
251 | case TTM_PL_TT: | |
d594e46a | 252 | new_start += rdev->mc.gtt_start; |
771fe6b9 JG |
253 | break; |
254 | default: | |
255 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
256 | return -EINVAL; | |
257 | } | |
27cd7769 | 258 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) { |
3000bf39 | 259 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
771fe6b9 JG |
260 | return -EINVAL; |
261 | } | |
003cefe0 AD |
262 | |
263 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | |
264 | ||
3000bf39 AD |
265 | /* sync other rings */ |
266 | if (rdev->family >= CHIP_R600) { | |
267 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | |
268 | /* no need to sync to our own or unused rings */ | |
27cd7769 | 269 | if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready) |
3000bf39 AD |
270 | continue; |
271 | ||
272 | if (!fence->semaphore) { | |
273 | r = radeon_semaphore_create(rdev, &fence->semaphore); | |
274 | /* FIXME: handle semaphore error */ | |
275 | if (r) | |
276 | continue; | |
277 | } | |
278 | ||
279 | r = radeon_ring_lock(rdev, &rdev->ring[i], 3); | |
280 | /* FIXME: handle ring lock error */ | |
281 | if (r) | |
282 | continue; | |
283 | radeon_semaphore_emit_signal(rdev, i, fence->semaphore); | |
284 | radeon_ring_unlock_commit(rdev, &rdev->ring[i]); | |
285 | ||
27cd7769 | 286 | r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3); |
3000bf39 AD |
287 | /* FIXME: handle ring lock error */ |
288 | if (r) | |
289 | continue; | |
27cd7769 AD |
290 | radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore); |
291 | radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]); | |
3000bf39 AD |
292 | } |
293 | } | |
294 | ||
003cefe0 AD |
295 | r = radeon_copy(rdev, old_start, new_start, |
296 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | |
297 | fence); | |
771fe6b9 JG |
298 | /* FIXME: handle copy error */ |
299 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, | |
9d87fa21 | 300 | evict, no_wait_reserve, no_wait_gpu, new_mem); |
771fe6b9 JG |
301 | radeon_fence_unref(&fence); |
302 | return r; | |
303 | } | |
304 | ||
305 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |
9d87fa21 JG |
306 | bool evict, bool interruptible, |
307 | bool no_wait_reserve, bool no_wait_gpu, | |
771fe6b9 JG |
308 | struct ttm_mem_reg *new_mem) |
309 | { | |
310 | struct radeon_device *rdev; | |
311 | struct ttm_mem_reg *old_mem = &bo->mem; | |
312 | struct ttm_mem_reg tmp_mem; | |
312ea8da JG |
313 | u32 placements; |
314 | struct ttm_placement placement; | |
771fe6b9 JG |
315 | int r; |
316 | ||
317 | rdev = radeon_get_rdev(bo->bdev); | |
318 | tmp_mem = *new_mem; | |
319 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
320 | placement.fpfn = 0; |
321 | placement.lpfn = 0; | |
322 | placement.num_placement = 1; | |
323 | placement.placement = &placements; | |
324 | placement.num_busy_placement = 1; | |
325 | placement.busy_placement = &placements; | |
326 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
327 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
9d87fa21 | 328 | interruptible, no_wait_reserve, no_wait_gpu); |
771fe6b9 JG |
329 | if (unlikely(r)) { |
330 | return r; | |
331 | } | |
df67bed9 DA |
332 | |
333 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
334 | if (unlikely(r)) { | |
335 | goto out_cleanup; | |
336 | } | |
337 | ||
771fe6b9 JG |
338 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
339 | if (unlikely(r)) { | |
340 | goto out_cleanup; | |
341 | } | |
9d87fa21 | 342 | r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); |
771fe6b9 JG |
343 | if (unlikely(r)) { |
344 | goto out_cleanup; | |
345 | } | |
9d87fa21 | 346 | r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
771fe6b9 | 347 | out_cleanup: |
42311ff9 | 348 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
349 | return r; |
350 | } | |
351 | ||
352 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |
9d87fa21 JG |
353 | bool evict, bool interruptible, |
354 | bool no_wait_reserve, bool no_wait_gpu, | |
771fe6b9 JG |
355 | struct ttm_mem_reg *new_mem) |
356 | { | |
357 | struct radeon_device *rdev; | |
358 | struct ttm_mem_reg *old_mem = &bo->mem; | |
359 | struct ttm_mem_reg tmp_mem; | |
312ea8da JG |
360 | struct ttm_placement placement; |
361 | u32 placements; | |
771fe6b9 JG |
362 | int r; |
363 | ||
364 | rdev = radeon_get_rdev(bo->bdev); | |
365 | tmp_mem = *new_mem; | |
366 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
367 | placement.fpfn = 0; |
368 | placement.lpfn = 0; | |
369 | placement.num_placement = 1; | |
370 | placement.placement = &placements; | |
371 | placement.num_busy_placement = 1; | |
372 | placement.busy_placement = &placements; | |
373 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
9d87fa21 | 374 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); |
771fe6b9 JG |
375 | if (unlikely(r)) { |
376 | return r; | |
377 | } | |
9d87fa21 | 378 | r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
771fe6b9 JG |
379 | if (unlikely(r)) { |
380 | goto out_cleanup; | |
381 | } | |
9d87fa21 | 382 | r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); |
771fe6b9 JG |
383 | if (unlikely(r)) { |
384 | goto out_cleanup; | |
385 | } | |
386 | out_cleanup: | |
42311ff9 | 387 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
388 | return r; |
389 | } | |
390 | ||
391 | static int radeon_bo_move(struct ttm_buffer_object *bo, | |
9d87fa21 JG |
392 | bool evict, bool interruptible, |
393 | bool no_wait_reserve, bool no_wait_gpu, | |
394 | struct ttm_mem_reg *new_mem) | |
771fe6b9 JG |
395 | { |
396 | struct radeon_device *rdev; | |
397 | struct ttm_mem_reg *old_mem = &bo->mem; | |
398 | int r; | |
399 | ||
400 | rdev = radeon_get_rdev(bo->bdev); | |
401 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
402 | radeon_move_null(bo, new_mem); | |
403 | return 0; | |
404 | } | |
405 | if ((old_mem->mem_type == TTM_PL_TT && | |
406 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
407 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
408 | new_mem->mem_type == TTM_PL_TT)) { | |
af901ca1 | 409 | /* bind is enough */ |
771fe6b9 JG |
410 | radeon_move_null(bo, new_mem); |
411 | return 0; | |
412 | } | |
27cd7769 AD |
413 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || |
414 | rdev->asic->copy.copy == NULL) { | |
771fe6b9 | 415 | /* use memcpy */ |
1ab2e105 | 416 | goto memcpy; |
771fe6b9 JG |
417 | } |
418 | ||
419 | if (old_mem->mem_type == TTM_PL_VRAM && | |
420 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
1ab2e105 | 421 | r = radeon_move_vram_ram(bo, evict, interruptible, |
9d87fa21 | 422 | no_wait_reserve, no_wait_gpu, new_mem); |
771fe6b9 JG |
423 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
424 | new_mem->mem_type == TTM_PL_VRAM) { | |
1ab2e105 | 425 | r = radeon_move_ram_vram(bo, evict, interruptible, |
9d87fa21 | 426 | no_wait_reserve, no_wait_gpu, new_mem); |
771fe6b9 | 427 | } else { |
9d87fa21 | 428 | r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); |
771fe6b9 | 429 | } |
1ab2e105 MD |
430 | |
431 | if (r) { | |
432 | memcpy: | |
9d87fa21 | 433 | r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
1ab2e105 | 434 | } |
771fe6b9 JG |
435 | return r; |
436 | } | |
437 | ||
0a2d50e3 JG |
438 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
439 | { | |
440 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
441 | struct radeon_device *rdev = radeon_get_rdev(bdev); | |
442 | ||
443 | mem->bus.addr = NULL; | |
444 | mem->bus.offset = 0; | |
445 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
446 | mem->bus.base = 0; | |
447 | mem->bus.is_iomem = false; | |
448 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
449 | return -EINVAL; | |
450 | switch (mem->mem_type) { | |
451 | case TTM_PL_SYSTEM: | |
452 | /* system memory */ | |
453 | return 0; | |
454 | case TTM_PL_TT: | |
455 | #if __OS_HAS_AGP | |
456 | if (rdev->flags & RADEON_IS_AGP) { | |
457 | /* RADEON_IS_AGP is set only if AGP is active */ | |
d961db75 | 458 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 | 459 | mem->bus.base = rdev->mc.agp_base; |
365048ff | 460 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
0a2d50e3 JG |
461 | } |
462 | #endif | |
463 | break; | |
464 | case TTM_PL_VRAM: | |
d961db75 | 465 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 JG |
466 | /* check if it's visible */ |
467 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) | |
468 | return -EINVAL; | |
469 | mem->bus.base = rdev->mc.aper_base; | |
470 | mem->bus.is_iomem = true; | |
ffb57c4b JE |
471 | #ifdef __alpha__ |
472 | /* | |
473 | * Alpha: use bus.addr to hold the ioremap() return, | |
474 | * so we can modify bus.base below. | |
475 | */ | |
476 | if (mem->placement & TTM_PL_FLAG_WC) | |
477 | mem->bus.addr = | |
478 | ioremap_wc(mem->bus.base + mem->bus.offset, | |
479 | mem->bus.size); | |
480 | else | |
481 | mem->bus.addr = | |
482 | ioremap_nocache(mem->bus.base + mem->bus.offset, | |
483 | mem->bus.size); | |
484 | ||
485 | /* | |
486 | * Alpha: Use just the bus offset plus | |
487 | * the hose/domain memory base for bus.base. | |
488 | * It then can be used to build PTEs for VRAM | |
489 | * access, as done in ttm_bo_vm_fault(). | |
490 | */ | |
491 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | |
492 | rdev->ddev->hose->dense_mem_base; | |
493 | #endif | |
0a2d50e3 JG |
494 | break; |
495 | default: | |
496 | return -EINVAL; | |
497 | } | |
498 | return 0; | |
499 | } | |
500 | ||
501 | static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
502 | { | |
503 | } | |
504 | ||
771fe6b9 JG |
505 | static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, |
506 | bool lazy, bool interruptible) | |
507 | { | |
508 | return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); | |
509 | } | |
510 | ||
511 | static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) | |
512 | { | |
513 | return 0; | |
514 | } | |
515 | ||
516 | static void radeon_sync_obj_unref(void **sync_obj) | |
517 | { | |
518 | radeon_fence_unref((struct radeon_fence **)sync_obj); | |
519 | } | |
520 | ||
521 | static void *radeon_sync_obj_ref(void *sync_obj) | |
522 | { | |
523 | return radeon_fence_ref((struct radeon_fence *)sync_obj); | |
524 | } | |
525 | ||
526 | static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) | |
527 | { | |
528 | return radeon_fence_signaled((struct radeon_fence *)sync_obj); | |
529 | } | |
530 | ||
649bf3ca JG |
531 | /* |
532 | * TTM backend functions. | |
533 | */ | |
534 | struct radeon_ttm_tt { | |
8e7e7052 | 535 | struct ttm_dma_tt ttm; |
649bf3ca JG |
536 | struct radeon_device *rdev; |
537 | u64 offset; | |
538 | }; | |
539 | ||
540 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, | |
541 | struct ttm_mem_reg *bo_mem) | |
542 | { | |
8e7e7052 | 543 | struct radeon_ttm_tt *gtt = (void*)ttm; |
649bf3ca JG |
544 | int r; |
545 | ||
649bf3ca JG |
546 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
547 | if (!ttm->num_pages) { | |
548 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
549 | ttm->num_pages, bo_mem, ttm); | |
550 | } | |
551 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | |
8e7e7052 | 552 | ttm->num_pages, ttm->pages, gtt->ttm.dma_address); |
649bf3ca JG |
553 | if (r) { |
554 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
555 | ttm->num_pages, (unsigned)gtt->offset); | |
556 | return r; | |
557 | } | |
558 | return 0; | |
559 | } | |
560 | ||
561 | static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) | |
562 | { | |
8e7e7052 | 563 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 564 | |
649bf3ca JG |
565 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); |
566 | return 0; | |
567 | } | |
568 | ||
569 | static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) | |
570 | { | |
8e7e7052 | 571 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 572 | |
8e7e7052 | 573 | ttm_dma_tt_fini(>t->ttm); |
649bf3ca JG |
574 | kfree(gtt); |
575 | } | |
576 | ||
577 | static struct ttm_backend_func radeon_backend_func = { | |
578 | .bind = &radeon_ttm_backend_bind, | |
579 | .unbind = &radeon_ttm_backend_unbind, | |
580 | .destroy = &radeon_ttm_backend_destroy, | |
581 | }; | |
582 | ||
583 | struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, | |
584 | unsigned long size, uint32_t page_flags, | |
585 | struct page *dummy_read_page) | |
586 | { | |
587 | struct radeon_device *rdev; | |
588 | struct radeon_ttm_tt *gtt; | |
589 | ||
590 | rdev = radeon_get_rdev(bdev); | |
591 | #if __OS_HAS_AGP | |
592 | if (rdev->flags & RADEON_IS_AGP) { | |
593 | return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, | |
594 | size, page_flags, dummy_read_page); | |
595 | } | |
596 | #endif | |
597 | ||
598 | gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); | |
599 | if (gtt == NULL) { | |
600 | return NULL; | |
601 | } | |
8e7e7052 | 602 | gtt->ttm.ttm.func = &radeon_backend_func; |
649bf3ca | 603 | gtt->rdev = rdev; |
8e7e7052 JG |
604 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { |
605 | kfree(gtt); | |
649bf3ca JG |
606 | return NULL; |
607 | } | |
8e7e7052 | 608 | return >t->ttm.ttm; |
649bf3ca JG |
609 | } |
610 | ||
c52494f6 KRW |
611 | static int radeon_ttm_tt_populate(struct ttm_tt *ttm) |
612 | { | |
613 | struct radeon_device *rdev; | |
8e7e7052 | 614 | struct radeon_ttm_tt *gtt = (void *)ttm; |
c52494f6 KRW |
615 | unsigned i; |
616 | int r; | |
617 | ||
618 | if (ttm->state != tt_unpopulated) | |
619 | return 0; | |
620 | ||
621 | rdev = radeon_get_rdev(ttm->bdev); | |
dea7e0ac JG |
622 | #if __OS_HAS_AGP |
623 | if (rdev->flags & RADEON_IS_AGP) { | |
624 | return ttm_agp_tt_populate(ttm); | |
625 | } | |
626 | #endif | |
c52494f6 KRW |
627 | |
628 | #ifdef CONFIG_SWIOTLB | |
629 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 630 | return ttm_dma_populate(>t->ttm, rdev->dev); |
c52494f6 KRW |
631 | } |
632 | #endif | |
633 | ||
634 | r = ttm_pool_populate(ttm); | |
635 | if (r) { | |
636 | return r; | |
637 | } | |
638 | ||
639 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
640 | gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], |
641 | 0, PAGE_SIZE, | |
642 | PCI_DMA_BIDIRECTIONAL); | |
643 | if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { | |
c52494f6 | 644 | while (--i) { |
8e7e7052 | 645 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], |
c52494f6 | 646 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
8e7e7052 | 647 | gtt->ttm.dma_address[i] = 0; |
c52494f6 KRW |
648 | } |
649 | ttm_pool_unpopulate(ttm); | |
650 | return -EFAULT; | |
651 | } | |
652 | } | |
653 | return 0; | |
654 | } | |
655 | ||
656 | static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
657 | { | |
658 | struct radeon_device *rdev; | |
8e7e7052 | 659 | struct radeon_ttm_tt *gtt = (void *)ttm; |
c52494f6 KRW |
660 | unsigned i; |
661 | ||
662 | rdev = radeon_get_rdev(ttm->bdev); | |
dea7e0ac JG |
663 | #if __OS_HAS_AGP |
664 | if (rdev->flags & RADEON_IS_AGP) { | |
665 | ttm_agp_tt_unpopulate(ttm); | |
666 | return; | |
667 | } | |
668 | #endif | |
c52494f6 KRW |
669 | |
670 | #ifdef CONFIG_SWIOTLB | |
671 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 672 | ttm_dma_unpopulate(>t->ttm, rdev->dev); |
c52494f6 KRW |
673 | return; |
674 | } | |
675 | #endif | |
676 | ||
677 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
678 | if (gtt->ttm.dma_address[i]) { |
679 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], | |
c52494f6 KRW |
680 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
681 | } | |
682 | } | |
683 | ||
684 | ttm_pool_unpopulate(ttm); | |
685 | } | |
649bf3ca | 686 | |
771fe6b9 | 687 | static struct ttm_bo_driver radeon_bo_driver = { |
649bf3ca | 688 | .ttm_tt_create = &radeon_ttm_tt_create, |
c52494f6 KRW |
689 | .ttm_tt_populate = &radeon_ttm_tt_populate, |
690 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, | |
771fe6b9 JG |
691 | .invalidate_caches = &radeon_invalidate_caches, |
692 | .init_mem_type = &radeon_init_mem_type, | |
693 | .evict_flags = &radeon_evict_flags, | |
694 | .move = &radeon_bo_move, | |
695 | .verify_access = &radeon_verify_access, | |
696 | .sync_obj_signaled = &radeon_sync_obj_signaled, | |
697 | .sync_obj_wait = &radeon_sync_obj_wait, | |
698 | .sync_obj_flush = &radeon_sync_obj_flush, | |
699 | .sync_obj_unref = &radeon_sync_obj_unref, | |
700 | .sync_obj_ref = &radeon_sync_obj_ref, | |
e024e110 DA |
701 | .move_notify = &radeon_bo_move_notify, |
702 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | |
0a2d50e3 JG |
703 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, |
704 | .io_mem_free = &radeon_ttm_io_mem_free, | |
771fe6b9 JG |
705 | }; |
706 | ||
707 | int radeon_ttm_init(struct radeon_device *rdev) | |
708 | { | |
709 | int r; | |
710 | ||
711 | r = radeon_ttm_global_init(rdev); | |
712 | if (r) { | |
713 | return r; | |
714 | } | |
715 | /* No others user of address space so set it to 0 */ | |
716 | r = ttm_bo_device_init(&rdev->mman.bdev, | |
a987fcaa | 717 | rdev->mman.bo_global_ref.ref.object, |
ad49f501 DA |
718 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, |
719 | rdev->need_dma32); | |
771fe6b9 JG |
720 | if (r) { |
721 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
722 | return r; | |
723 | } | |
0a0c7596 | 724 | rdev->mman.initialized = true; |
4c788679 | 725 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
312ea8da | 726 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
771fe6b9 JG |
727 | if (r) { |
728 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
729 | return r; | |
730 | } | |
441921d5 | 731 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
4c788679 JG |
732 | RADEON_GEM_DOMAIN_VRAM, |
733 | &rdev->stollen_vga_memory); | |
771fe6b9 JG |
734 | if (r) { |
735 | return r; | |
736 | } | |
4c788679 JG |
737 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
738 | if (r) | |
739 | return r; | |
740 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | |
741 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
771fe6b9 | 742 | if (r) { |
4c788679 | 743 | radeon_bo_unref(&rdev->stollen_vga_memory); |
771fe6b9 JG |
744 | return r; |
745 | } | |
746 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | |
3ce0a23d | 747 | (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); |
4c788679 | 748 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
312ea8da | 749 | rdev->mc.gtt_size >> PAGE_SHIFT); |
771fe6b9 JG |
750 | if (r) { |
751 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
752 | return r; | |
753 | } | |
754 | DRM_INFO("radeon: %uM of GTT memory ready.\n", | |
3ce0a23d | 755 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
771fe6b9 JG |
756 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
757 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | |
758 | } | |
fa8a1238 DA |
759 | |
760 | r = radeon_ttm_debugfs_init(rdev); | |
761 | if (r) { | |
762 | DRM_ERROR("Failed to init debugfs\n"); | |
763 | return r; | |
764 | } | |
771fe6b9 JG |
765 | return 0; |
766 | } | |
767 | ||
768 | void radeon_ttm_fini(struct radeon_device *rdev) | |
769 | { | |
4c788679 JG |
770 | int r; |
771 | ||
0a0c7596 JG |
772 | if (!rdev->mman.initialized) |
773 | return; | |
771fe6b9 | 774 | if (rdev->stollen_vga_memory) { |
4c788679 JG |
775 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
776 | if (r == 0) { | |
777 | radeon_bo_unpin(rdev->stollen_vga_memory); | |
778 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
779 | } | |
780 | radeon_bo_unref(&rdev->stollen_vga_memory); | |
771fe6b9 JG |
781 | } |
782 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
783 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | |
784 | ttm_bo_device_release(&rdev->mman.bdev); | |
785 | radeon_gart_fini(rdev); | |
786 | radeon_ttm_global_fini(rdev); | |
0a0c7596 | 787 | rdev->mman.initialized = false; |
771fe6b9 JG |
788 | DRM_INFO("radeon: ttm finalized\n"); |
789 | } | |
790 | ||
53595338 DA |
791 | /* this should only be called at bootup or when userspace |
792 | * isn't running */ | |
793 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | |
794 | { | |
795 | struct ttm_mem_type_manager *man; | |
796 | ||
797 | if (!rdev->mman.initialized) | |
798 | return; | |
799 | ||
800 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
801 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | |
802 | man->size = size >> PAGE_SHIFT; | |
803 | } | |
804 | ||
771fe6b9 | 805 | static struct vm_operations_struct radeon_ttm_vm_ops; |
f0f37e2f | 806 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
771fe6b9 JG |
807 | |
808 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
809 | { | |
810 | struct ttm_buffer_object *bo; | |
5876dd24 | 811 | struct radeon_device *rdev; |
771fe6b9 JG |
812 | int r; |
813 | ||
5876dd24 | 814 | bo = (struct ttm_buffer_object *)vma->vm_private_data; |
771fe6b9 JG |
815 | if (bo == NULL) { |
816 | return VM_FAULT_NOPAGE; | |
817 | } | |
5876dd24 MG |
818 | rdev = radeon_get_rdev(bo->bdev); |
819 | mutex_lock(&rdev->vram_mutex); | |
771fe6b9 | 820 | r = ttm_vm_ops->fault(vma, vmf); |
5876dd24 | 821 | mutex_unlock(&rdev->vram_mutex); |
771fe6b9 JG |
822 | return r; |
823 | } | |
824 | ||
825 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |
826 | { | |
827 | struct drm_file *file_priv; | |
828 | struct radeon_device *rdev; | |
829 | int r; | |
830 | ||
831 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | |
832 | return drm_mmap(filp, vma); | |
833 | } | |
834 | ||
40b3be3f | 835 | file_priv = filp->private_data; |
771fe6b9 JG |
836 | rdev = file_priv->minor->dev->dev_private; |
837 | if (rdev == NULL) { | |
838 | return -EINVAL; | |
839 | } | |
840 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); | |
841 | if (unlikely(r != 0)) { | |
842 | return r; | |
843 | } | |
844 | if (unlikely(ttm_vm_ops == NULL)) { | |
845 | ttm_vm_ops = vma->vm_ops; | |
846 | radeon_ttm_vm_ops = *ttm_vm_ops; | |
847 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; | |
848 | } | |
849 | vma->vm_ops = &radeon_ttm_vm_ops; | |
850 | return 0; | |
851 | } | |
852 | ||
853 | ||
fa8a1238 DA |
854 | #define RADEON_DEBUGFS_MEM_TYPES 2 |
855 | ||
fa8a1238 DA |
856 | #if defined(CONFIG_DEBUG_FS) |
857 | static int radeon_mm_dump_table(struct seq_file *m, void *data) | |
858 | { | |
859 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
860 | struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; | |
861 | struct drm_device *dev = node->minor->dev; | |
862 | struct radeon_device *rdev = dev->dev_private; | |
863 | int ret; | |
864 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | |
865 | ||
866 | spin_lock(&glob->lru_lock); | |
867 | ret = drm_mm_dump_table(m, mm); | |
868 | spin_unlock(&glob->lru_lock); | |
869 | return ret; | |
870 | } | |
871 | #endif | |
872 | ||
873 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |
874 | { | |
f4e45d02 | 875 | #if defined(CONFIG_DEBUG_FS) |
c52494f6 KRW |
876 | static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2]; |
877 | static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32]; | |
fa8a1238 DA |
878 | unsigned i; |
879 | ||
fa8a1238 DA |
880 | for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { |
881 | if (i == 0) | |
882 | sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); | |
883 | else | |
884 | sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); | |
885 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
886 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | |
887 | radeon_mem_types_list[i].driver_features = 0; | |
888 | if (i == 0) | |
16f9fdcb | 889 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; |
fa8a1238 | 890 | else |
16f9fdcb | 891 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; |
fa8a1238 DA |
892 | |
893 | } | |
8d7cddcd PN |
894 | /* Add ttm page pool to debugfs */ |
895 | sprintf(radeon_mem_types_names[i], "ttm_page_pool"); | |
896 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
897 | radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; | |
898 | radeon_mem_types_list[i].driver_features = 0; | |
c52494f6 KRW |
899 | radeon_mem_types_list[i++].data = NULL; |
900 | #ifdef CONFIG_SWIOTLB | |
901 | if (swiotlb_nr_tbl()) { | |
902 | sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool"); | |
903 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
904 | radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs; | |
905 | radeon_mem_types_list[i].driver_features = 0; | |
906 | radeon_mem_types_list[i++].data = NULL; | |
907 | } | |
908 | #endif | |
909 | return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i); | |
fa8a1238 DA |
910 | |
911 | #endif | |
912 | return 0; | |
913 | } |