]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
36 | #include <drm/drmP.h> | |
37 | #include <drm/radeon_drm.h> | |
38 | #include "radeon_reg.h" | |
39 | #include "radeon.h" | |
40 | ||
41 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
42 | ||
43 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) | |
44 | { | |
45 | struct radeon_mman *mman; | |
46 | struct radeon_device *rdev; | |
47 | ||
48 | mman = container_of(bdev, struct radeon_mman, bdev); | |
49 | rdev = container_of(mman, struct radeon_device, mman); | |
50 | return rdev; | |
51 | } | |
52 | ||
53 | ||
54 | /* | |
55 | * Global memory. | |
56 | */ | |
57 | static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref) | |
58 | { | |
59 | return ttm_mem_global_init(ref->object); | |
60 | } | |
61 | ||
62 | static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref) | |
63 | { | |
64 | ttm_mem_global_release(ref->object); | |
65 | } | |
66 | ||
67 | static int radeon_ttm_global_init(struct radeon_device *rdev) | |
68 | { | |
69 | struct ttm_global_reference *global_ref; | |
70 | int r; | |
71 | ||
72 | rdev->mman.mem_global_referenced = false; | |
73 | global_ref = &rdev->mman.mem_global_ref; | |
74 | global_ref->global_type = TTM_GLOBAL_TTM_MEM; | |
75 | global_ref->size = sizeof(struct ttm_mem_global); | |
76 | global_ref->init = &radeon_ttm_mem_global_init; | |
77 | global_ref->release = &radeon_ttm_mem_global_release; | |
78 | r = ttm_global_item_ref(global_ref); | |
79 | if (r != 0) { | |
80 | DRM_ERROR("Failed referencing a global TTM memory object.\n"); | |
81 | return r; | |
82 | } | |
83 | rdev->mman.mem_global_referenced = true; | |
84 | return 0; | |
85 | } | |
86 | ||
87 | static void radeon_ttm_global_fini(struct radeon_device *rdev) | |
88 | { | |
89 | if (rdev->mman.mem_global_referenced) { | |
90 | ttm_global_item_unref(&rdev->mman.mem_global_ref); | |
91 | rdev->mman.mem_global_referenced = false; | |
92 | } | |
93 | } | |
94 | ||
95 | struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev); | |
96 | ||
97 | static struct ttm_backend* | |
98 | radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev) | |
99 | { | |
100 | struct radeon_device *rdev; | |
101 | ||
102 | rdev = radeon_get_rdev(bdev); | |
103 | #if __OS_HAS_AGP | |
104 | if (rdev->flags & RADEON_IS_AGP) { | |
105 | return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge); | |
106 | } else | |
107 | #endif | |
108 | { | |
109 | return radeon_ttm_backend_create(rdev); | |
110 | } | |
111 | } | |
112 | ||
113 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
114 | { | |
115 | return 0; | |
116 | } | |
117 | ||
118 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
119 | struct ttm_mem_type_manager *man) | |
120 | { | |
121 | struct radeon_device *rdev; | |
122 | ||
123 | rdev = radeon_get_rdev(bdev); | |
124 | ||
125 | switch (type) { | |
126 | case TTM_PL_SYSTEM: | |
127 | /* System memory */ | |
128 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
129 | man->available_caching = TTM_PL_MASK_CACHING; | |
130 | man->default_caching = TTM_PL_FLAG_CACHED; | |
131 | break; | |
132 | case TTM_PL_TT: | |
133 | man->gpu_offset = 0; | |
134 | man->available_caching = TTM_PL_MASK_CACHING; | |
135 | man->default_caching = TTM_PL_FLAG_CACHED; | |
136 | #if __OS_HAS_AGP | |
137 | if (rdev->flags & RADEON_IS_AGP) { | |
138 | if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { | |
139 | DRM_ERROR("AGP is not enabled for memory type %u\n", | |
140 | (unsigned)type); | |
141 | return -EINVAL; | |
142 | } | |
143 | man->io_offset = rdev->mc.agp_base; | |
144 | man->io_size = rdev->mc.gtt_size; | |
145 | man->io_addr = NULL; | |
146 | man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | | |
147 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
148 | man->available_caching = TTM_PL_FLAG_UNCACHED | | |
149 | TTM_PL_FLAG_WC; | |
150 | man->default_caching = TTM_PL_FLAG_WC; | |
151 | } else | |
152 | #endif | |
153 | { | |
154 | man->io_offset = 0; | |
155 | man->io_size = 0; | |
156 | man->io_addr = NULL; | |
157 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | |
158 | TTM_MEMTYPE_FLAG_CMA; | |
159 | } | |
160 | break; | |
161 | case TTM_PL_VRAM: | |
162 | /* "On-card" video ram */ | |
163 | man->gpu_offset = 0; | |
164 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
165 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | | |
166 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
167 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
168 | man->default_caching = TTM_PL_FLAG_WC; | |
169 | man->io_addr = NULL; | |
170 | man->io_offset = rdev->mc.aper_base; | |
171 | man->io_size = rdev->mc.aper_size; | |
172 | break; | |
173 | default: | |
174 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
175 | return -EINVAL; | |
176 | } | |
177 | return 0; | |
178 | } | |
179 | ||
180 | static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo) | |
181 | { | |
182 | uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE; | |
183 | ||
184 | switch (bo->mem.mem_type) { | |
185 | default: | |
186 | return (cur_placement & ~TTM_PL_MASK_CACHING) | | |
187 | TTM_PL_FLAG_SYSTEM | | |
188 | TTM_PL_FLAG_CACHED; | |
189 | } | |
190 | } | |
191 | ||
192 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
193 | { | |
194 | return 0; | |
195 | } | |
196 | ||
197 | static void radeon_move_null(struct ttm_buffer_object *bo, | |
198 | struct ttm_mem_reg *new_mem) | |
199 | { | |
200 | struct ttm_mem_reg *old_mem = &bo->mem; | |
201 | ||
202 | BUG_ON(old_mem->mm_node != NULL); | |
203 | *old_mem = *new_mem; | |
204 | new_mem->mm_node = NULL; | |
205 | } | |
206 | ||
207 | static int radeon_move_blit(struct ttm_buffer_object *bo, | |
208 | bool evict, int no_wait, | |
209 | struct ttm_mem_reg *new_mem, | |
210 | struct ttm_mem_reg *old_mem) | |
211 | { | |
212 | struct radeon_device *rdev; | |
213 | uint64_t old_start, new_start; | |
214 | struct radeon_fence *fence; | |
215 | int r; | |
216 | ||
217 | rdev = radeon_get_rdev(bo->bdev); | |
218 | r = radeon_fence_create(rdev, &fence); | |
219 | if (unlikely(r)) { | |
220 | return r; | |
221 | } | |
222 | old_start = old_mem->mm_node->start << PAGE_SHIFT; | |
223 | new_start = new_mem->mm_node->start << PAGE_SHIFT; | |
224 | ||
225 | switch (old_mem->mem_type) { | |
226 | case TTM_PL_VRAM: | |
227 | old_start += rdev->mc.vram_location; | |
228 | break; | |
229 | case TTM_PL_TT: | |
230 | old_start += rdev->mc.gtt_location; | |
231 | break; | |
232 | default: | |
233 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
234 | return -EINVAL; | |
235 | } | |
236 | switch (new_mem->mem_type) { | |
237 | case TTM_PL_VRAM: | |
238 | new_start += rdev->mc.vram_location; | |
239 | break; | |
240 | case TTM_PL_TT: | |
241 | new_start += rdev->mc.gtt_location; | |
242 | break; | |
243 | default: | |
244 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
245 | return -EINVAL; | |
246 | } | |
247 | if (!rdev->cp.ready) { | |
248 | DRM_ERROR("Trying to move memory with CP turned off.\n"); | |
249 | return -EINVAL; | |
250 | } | |
251 | r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); | |
252 | /* FIXME: handle copy error */ | |
253 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, | |
254 | evict, no_wait, new_mem); | |
255 | radeon_fence_unref(&fence); | |
256 | return r; | |
257 | } | |
258 | ||
259 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |
260 | bool evict, bool interruptible, bool no_wait, | |
261 | struct ttm_mem_reg *new_mem) | |
262 | { | |
263 | struct radeon_device *rdev; | |
264 | struct ttm_mem_reg *old_mem = &bo->mem; | |
265 | struct ttm_mem_reg tmp_mem; | |
266 | uint32_t proposed_placement; | |
267 | int r; | |
268 | ||
269 | rdev = radeon_get_rdev(bo->bdev); | |
270 | tmp_mem = *new_mem; | |
271 | tmp_mem.mm_node = NULL; | |
272 | proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
273 | r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem, | |
274 | interruptible, no_wait); | |
275 | if (unlikely(r)) { | |
276 | return r; | |
277 | } | |
278 | r = ttm_tt_bind(bo->ttm, &tmp_mem); | |
279 | if (unlikely(r)) { | |
280 | goto out_cleanup; | |
281 | } | |
282 | r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); | |
283 | if (unlikely(r)) { | |
284 | goto out_cleanup; | |
285 | } | |
286 | r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); | |
287 | out_cleanup: | |
288 | if (tmp_mem.mm_node) { | |
289 | spin_lock(&rdev->mman.bdev.lru_lock); | |
290 | drm_mm_put_block(tmp_mem.mm_node); | |
291 | spin_unlock(&rdev->mman.bdev.lru_lock); | |
292 | return r; | |
293 | } | |
294 | return r; | |
295 | } | |
296 | ||
297 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |
298 | bool evict, bool interruptible, bool no_wait, | |
299 | struct ttm_mem_reg *new_mem) | |
300 | { | |
301 | struct radeon_device *rdev; | |
302 | struct ttm_mem_reg *old_mem = &bo->mem; | |
303 | struct ttm_mem_reg tmp_mem; | |
304 | uint32_t proposed_flags; | |
305 | int r; | |
306 | ||
307 | rdev = radeon_get_rdev(bo->bdev); | |
308 | tmp_mem = *new_mem; | |
309 | tmp_mem.mm_node = NULL; | |
310 | proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
311 | r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, | |
312 | interruptible, no_wait); | |
313 | if (unlikely(r)) { | |
314 | return r; | |
315 | } | |
316 | r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); | |
317 | if (unlikely(r)) { | |
318 | goto out_cleanup; | |
319 | } | |
320 | r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); | |
321 | if (unlikely(r)) { | |
322 | goto out_cleanup; | |
323 | } | |
324 | out_cleanup: | |
325 | if (tmp_mem.mm_node) { | |
326 | spin_lock(&rdev->mman.bdev.lru_lock); | |
327 | drm_mm_put_block(tmp_mem.mm_node); | |
328 | spin_unlock(&rdev->mman.bdev.lru_lock); | |
329 | return r; | |
330 | } | |
331 | return r; | |
332 | } | |
333 | ||
334 | static int radeon_bo_move(struct ttm_buffer_object *bo, | |
335 | bool evict, bool interruptible, bool no_wait, | |
336 | struct ttm_mem_reg *new_mem) | |
337 | { | |
338 | struct radeon_device *rdev; | |
339 | struct ttm_mem_reg *old_mem = &bo->mem; | |
340 | int r; | |
341 | ||
342 | rdev = radeon_get_rdev(bo->bdev); | |
343 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
344 | radeon_move_null(bo, new_mem); | |
345 | return 0; | |
346 | } | |
347 | if ((old_mem->mem_type == TTM_PL_TT && | |
348 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
349 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
350 | new_mem->mem_type == TTM_PL_TT)) { | |
351 | /* bind is enought */ | |
352 | radeon_move_null(bo, new_mem); | |
353 | return 0; | |
354 | } | |
355 | if (!rdev->cp.ready) { | |
356 | /* use memcpy */ | |
357 | DRM_ERROR("CP is not ready use memcpy.\n"); | |
358 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
359 | } | |
360 | ||
361 | if (old_mem->mem_type == TTM_PL_VRAM && | |
362 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
363 | return radeon_move_vram_ram(bo, evict, interruptible, | |
364 | no_wait, new_mem); | |
365 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | |
366 | new_mem->mem_type == TTM_PL_VRAM) { | |
367 | return radeon_move_ram_vram(bo, evict, interruptible, | |
368 | no_wait, new_mem); | |
369 | } else { | |
370 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); | |
371 | if (unlikely(r)) { | |
372 | return r; | |
373 | } | |
374 | } | |
375 | return r; | |
376 | } | |
377 | ||
378 | const uint32_t radeon_mem_prios[] = { | |
379 | TTM_PL_VRAM, | |
380 | TTM_PL_TT, | |
381 | TTM_PL_SYSTEM, | |
382 | }; | |
383 | ||
384 | const uint32_t radeon_busy_prios[] = { | |
385 | TTM_PL_TT, | |
386 | TTM_PL_VRAM, | |
387 | TTM_PL_SYSTEM, | |
388 | }; | |
389 | ||
390 | static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, | |
391 | bool lazy, bool interruptible) | |
392 | { | |
393 | return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); | |
394 | } | |
395 | ||
396 | static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) | |
397 | { | |
398 | return 0; | |
399 | } | |
400 | ||
401 | static void radeon_sync_obj_unref(void **sync_obj) | |
402 | { | |
403 | radeon_fence_unref((struct radeon_fence **)sync_obj); | |
404 | } | |
405 | ||
406 | static void *radeon_sync_obj_ref(void *sync_obj) | |
407 | { | |
408 | return radeon_fence_ref((struct radeon_fence *)sync_obj); | |
409 | } | |
410 | ||
411 | static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) | |
412 | { | |
413 | return radeon_fence_signaled((struct radeon_fence *)sync_obj); | |
414 | } | |
415 | ||
416 | static struct ttm_bo_driver radeon_bo_driver = { | |
417 | .mem_type_prio = radeon_mem_prios, | |
418 | .mem_busy_prio = radeon_busy_prios, | |
419 | .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios), | |
420 | .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios), | |
421 | .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, | |
422 | .invalidate_caches = &radeon_invalidate_caches, | |
423 | .init_mem_type = &radeon_init_mem_type, | |
424 | .evict_flags = &radeon_evict_flags, | |
425 | .move = &radeon_bo_move, | |
426 | .verify_access = &radeon_verify_access, | |
427 | .sync_obj_signaled = &radeon_sync_obj_signaled, | |
428 | .sync_obj_wait = &radeon_sync_obj_wait, | |
429 | .sync_obj_flush = &radeon_sync_obj_flush, | |
430 | .sync_obj_unref = &radeon_sync_obj_unref, | |
431 | .sync_obj_ref = &radeon_sync_obj_ref, | |
432 | }; | |
433 | ||
434 | int radeon_ttm_init(struct radeon_device *rdev) | |
435 | { | |
436 | int r; | |
437 | ||
438 | r = radeon_ttm_global_init(rdev); | |
439 | if (r) { | |
440 | return r; | |
441 | } | |
442 | /* No others user of address space so set it to 0 */ | |
443 | r = ttm_bo_device_init(&rdev->mman.bdev, | |
444 | rdev->mman.mem_global_ref.object, | |
445 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); | |
446 | if (r) { | |
447 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
448 | return r; | |
449 | } | |
450 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, | |
451 | ((rdev->mc.aper_size) >> PAGE_SHIFT)); | |
452 | if (r) { | |
453 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
454 | return r; | |
455 | } | |
456 | r = radeon_object_create(rdev, NULL, 256 * 1024, true, | |
457 | RADEON_GEM_DOMAIN_VRAM, false, | |
458 | &rdev->stollen_vga_memory); | |
459 | if (r) { | |
460 | return r; | |
461 | } | |
462 | r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | |
463 | if (r) { | |
464 | radeon_object_unref(&rdev->stollen_vga_memory); | |
465 | return r; | |
466 | } | |
467 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | |
468 | rdev->mc.vram_size / (1024 * 1024)); | |
469 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, | |
470 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); | |
471 | if (r) { | |
472 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
473 | return r; | |
474 | } | |
475 | DRM_INFO("radeon: %uM of GTT memory ready.\n", | |
476 | rdev->mc.gtt_size / (1024 * 1024)); | |
477 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | |
478 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | |
479 | } | |
480 | return 0; | |
481 | } | |
482 | ||
483 | void radeon_ttm_fini(struct radeon_device *rdev) | |
484 | { | |
485 | if (rdev->stollen_vga_memory) { | |
486 | radeon_object_unpin(rdev->stollen_vga_memory); | |
487 | radeon_object_unref(&rdev->stollen_vga_memory); | |
488 | } | |
489 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
490 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | |
491 | ttm_bo_device_release(&rdev->mman.bdev); | |
492 | radeon_gart_fini(rdev); | |
493 | radeon_ttm_global_fini(rdev); | |
494 | DRM_INFO("radeon: ttm finalized\n"); | |
495 | } | |
496 | ||
497 | static struct vm_operations_struct radeon_ttm_vm_ops; | |
498 | static struct vm_operations_struct *ttm_vm_ops = NULL; | |
499 | ||
500 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
501 | { | |
502 | struct ttm_buffer_object *bo; | |
503 | int r; | |
504 | ||
505 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | |
506 | if (bo == NULL) { | |
507 | return VM_FAULT_NOPAGE; | |
508 | } | |
509 | r = ttm_vm_ops->fault(vma, vmf); | |
510 | return r; | |
511 | } | |
512 | ||
513 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |
514 | { | |
515 | struct drm_file *file_priv; | |
516 | struct radeon_device *rdev; | |
517 | int r; | |
518 | ||
519 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | |
520 | return drm_mmap(filp, vma); | |
521 | } | |
522 | ||
523 | file_priv = (struct drm_file *)filp->private_data; | |
524 | rdev = file_priv->minor->dev->dev_private; | |
525 | if (rdev == NULL) { | |
526 | return -EINVAL; | |
527 | } | |
528 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); | |
529 | if (unlikely(r != 0)) { | |
530 | return r; | |
531 | } | |
532 | if (unlikely(ttm_vm_ops == NULL)) { | |
533 | ttm_vm_ops = vma->vm_ops; | |
534 | radeon_ttm_vm_ops = *ttm_vm_ops; | |
535 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; | |
536 | } | |
537 | vma->vm_ops = &radeon_ttm_vm_ops; | |
538 | return 0; | |
539 | } | |
540 | ||
541 | ||
542 | /* | |
543 | * TTM backend functions. | |
544 | */ | |
545 | struct radeon_ttm_backend { | |
546 | struct ttm_backend backend; | |
547 | struct radeon_device *rdev; | |
548 | unsigned long num_pages; | |
549 | struct page **pages; | |
550 | struct page *dummy_read_page; | |
551 | bool populated; | |
552 | bool bound; | |
553 | unsigned offset; | |
554 | }; | |
555 | ||
556 | static int radeon_ttm_backend_populate(struct ttm_backend *backend, | |
557 | unsigned long num_pages, | |
558 | struct page **pages, | |
559 | struct page *dummy_read_page) | |
560 | { | |
561 | struct radeon_ttm_backend *gtt; | |
562 | ||
563 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
564 | gtt->pages = pages; | |
565 | gtt->num_pages = num_pages; | |
566 | gtt->dummy_read_page = dummy_read_page; | |
567 | gtt->populated = true; | |
568 | return 0; | |
569 | } | |
570 | ||
571 | static void radeon_ttm_backend_clear(struct ttm_backend *backend) | |
572 | { | |
573 | struct radeon_ttm_backend *gtt; | |
574 | ||
575 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
576 | gtt->pages = NULL; | |
577 | gtt->num_pages = 0; | |
578 | gtt->dummy_read_page = NULL; | |
579 | gtt->populated = false; | |
580 | gtt->bound = false; | |
581 | } | |
582 | ||
583 | ||
584 | static int radeon_ttm_backend_bind(struct ttm_backend *backend, | |
585 | struct ttm_mem_reg *bo_mem) | |
586 | { | |
587 | struct radeon_ttm_backend *gtt; | |
588 | int r; | |
589 | ||
590 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
591 | gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; | |
592 | if (!gtt->num_pages) { | |
593 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); | |
594 | } | |
595 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | |
596 | gtt->num_pages, gtt->pages); | |
597 | if (r) { | |
598 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
599 | gtt->num_pages, gtt->offset); | |
600 | return r; | |
601 | } | |
602 | gtt->bound = true; | |
603 | return 0; | |
604 | } | |
605 | ||
606 | static int radeon_ttm_backend_unbind(struct ttm_backend *backend) | |
607 | { | |
608 | struct radeon_ttm_backend *gtt; | |
609 | ||
610 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
611 | radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages); | |
612 | gtt->bound = false; | |
613 | return 0; | |
614 | } | |
615 | ||
616 | static void radeon_ttm_backend_destroy(struct ttm_backend *backend) | |
617 | { | |
618 | struct radeon_ttm_backend *gtt; | |
619 | ||
620 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
621 | if (gtt->bound) { | |
622 | radeon_ttm_backend_unbind(backend); | |
623 | } | |
624 | kfree(gtt); | |
625 | } | |
626 | ||
627 | static struct ttm_backend_func radeon_backend_func = { | |
628 | .populate = &radeon_ttm_backend_populate, | |
629 | .clear = &radeon_ttm_backend_clear, | |
630 | .bind = &radeon_ttm_backend_bind, | |
631 | .unbind = &radeon_ttm_backend_unbind, | |
632 | .destroy = &radeon_ttm_backend_destroy, | |
633 | }; | |
634 | ||
635 | struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev) | |
636 | { | |
637 | struct radeon_ttm_backend *gtt; | |
638 | ||
639 | gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL); | |
640 | if (gtt == NULL) { | |
641 | return NULL; | |
642 | } | |
643 | gtt->backend.bdev = &rdev->mman.bdev; | |
644 | gtt->backend.flags = 0; | |
645 | gtt->backend.func = &radeon_backend_func; | |
646 | gtt->rdev = rdev; | |
647 | gtt->pages = NULL; | |
648 | gtt->num_pages = 0; | |
649 | gtt->dummy_read_page = NULL; | |
650 | gtt->populated = false; | |
651 | gtt->bound = false; | |
652 | return >t->backend; | |
653 | } |