]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
760285e7 DH |
28 | #include <drm/drmP.h> |
29 | #include <drm/radeon_drm.h> | |
771fe6b9 JG |
30 | #include "radeon.h" |
31 | ||
771fe6b9 JG |
32 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
33 | { | |
7e4d15d9 | 34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
771fe6b9 | 35 | |
771fe6b9 | 36 | if (robj) { |
40f5cf99 AD |
37 | if (robj->gem_base.import_attach) |
38 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | |
12f1384d | 39 | radeon_mn_unregister(robj); |
4c788679 | 40 | radeon_bo_unref(&robj); |
771fe6b9 JG |
41 | } |
42 | } | |
43 | ||
391bfec3 | 44 | int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
4c788679 | 45 | int alignment, int initial_domain, |
ed5cb43f | 46 | u32 flags, bool kernel, |
4c788679 | 47 | struct drm_gem_object **obj) |
771fe6b9 | 48 | { |
4c788679 | 49 | struct radeon_bo *robj; |
6c0d112f | 50 | unsigned long max_size; |
771fe6b9 JG |
51 | int r; |
52 | ||
53 | *obj = NULL; | |
771fe6b9 JG |
54 | /* At least align on page size */ |
55 | if (alignment < PAGE_SIZE) { | |
56 | alignment = PAGE_SIZE; | |
57 | } | |
6c0d112f | 58 | |
391bfec3 AD |
59 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
60 | * handle vram to system pool migrations. | |
61 | */ | |
62 | max_size = rdev->mc.gtt_size - rdev->gart_pin_size; | |
6c0d112f | 63 | if (size > max_size) { |
391bfec3 | 64 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
380670ae | 65 | size >> 20, max_size >> 20); |
6c0d112f CK |
66 | return -ENOMEM; |
67 | } | |
68 | ||
0fe7158c | 69 | retry: |
02376d82 | 70 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
831b6966 | 71 | flags, NULL, NULL, &robj); |
771fe6b9 | 72 | if (r) { |
0fe7158c CK |
73 | if (r != -ERESTARTSYS) { |
74 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { | |
75 | initial_domain |= RADEON_GEM_DOMAIN_GTT; | |
76 | goto retry; | |
77 | } | |
391bfec3 | 78 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
ecabd32a | 79 | size, initial_domain, alignment, r); |
0fe7158c | 80 | } |
771fe6b9 JG |
81 | return r; |
82 | } | |
441921d5 | 83 | *obj = &robj->gem_base; |
409851f4 | 84 | robj->pid = task_pid_nr(current); |
441921d5 DV |
85 | |
86 | mutex_lock(&rdev->gem.mutex); | |
87 | list_add_tail(&robj->list, &rdev->gem.objects); | |
88 | mutex_unlock(&rdev->gem.mutex); | |
89 | ||
771fe6b9 JG |
90 | return 0; |
91 | } | |
92 | ||
248a6c4a | 93 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
771fe6b9 JG |
94 | uint32_t rdomain, uint32_t wdomain) |
95 | { | |
4c788679 | 96 | struct radeon_bo *robj; |
771fe6b9 | 97 | uint32_t domain; |
39e7f6f8 | 98 | long r; |
771fe6b9 JG |
99 | |
100 | /* FIXME: reeimplement */ | |
7e4d15d9 | 101 | robj = gem_to_radeon_bo(gobj); |
771fe6b9 JG |
102 | /* work out where to validate the buffer to */ |
103 | domain = wdomain; | |
104 | if (!domain) { | |
105 | domain = rdomain; | |
106 | } | |
107 | if (!domain) { | |
108 | /* Do nothings */ | |
b6cafa27 | 109 | printk(KERN_WARNING "Set domain without domain !\n"); |
771fe6b9 JG |
110 | return 0; |
111 | } | |
112 | if (domain == RADEON_GEM_DOMAIN_CPU) { | |
113 | /* Asking for cpu access wait for object idle */ | |
39e7f6f8 ML |
114 | r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); |
115 | if (!r) | |
116 | r = -EBUSY; | |
117 | ||
118 | if (r < 0 && r != -EINTR) { | |
119 | printk(KERN_ERR "Failed to wait for object: %li\n", r); | |
771fe6b9 JG |
120 | return r; |
121 | } | |
122 | } | |
123 | return 0; | |
124 | } | |
125 | ||
126 | int radeon_gem_init(struct radeon_device *rdev) | |
127 | { | |
128 | INIT_LIST_HEAD(&rdev->gem.objects); | |
129 | return 0; | |
130 | } | |
131 | ||
132 | void radeon_gem_fini(struct radeon_device *rdev) | |
133 | { | |
4c788679 | 134 | radeon_bo_force_delete(rdev); |
771fe6b9 JG |
135 | } |
136 | ||
721604a1 JG |
137 | /* |
138 | * Call from drm_gem_handle_create which appear in both new and open ioctl | |
139 | * case. | |
140 | */ | |
141 | int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | |
142 | { | |
e971bd5e CK |
143 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); |
144 | struct radeon_device *rdev = rbo->rdev; | |
145 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | |
146 | struct radeon_vm *vm = &fpriv->vm; | |
147 | struct radeon_bo_va *bo_va; | |
148 | int r; | |
149 | ||
544143f9 AD |
150 | if ((rdev->family < CHIP_CAYMAN) || |
151 | (!rdev->accel_working)) { | |
e971bd5e CK |
152 | return 0; |
153 | } | |
154 | ||
155 | r = radeon_bo_reserve(rbo, false); | |
156 | if (r) { | |
157 | return r; | |
158 | } | |
159 | ||
160 | bo_va = radeon_vm_bo_find(vm, rbo); | |
161 | if (!bo_va) { | |
162 | bo_va = radeon_vm_bo_add(rdev, vm, rbo); | |
163 | } else { | |
164 | ++bo_va->ref_count; | |
165 | } | |
166 | radeon_bo_unreserve(rbo); | |
167 | ||
721604a1 JG |
168 | return 0; |
169 | } | |
170 | ||
171 | void radeon_gem_object_close(struct drm_gem_object *obj, | |
172 | struct drm_file *file_priv) | |
173 | { | |
174 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); | |
175 | struct radeon_device *rdev = rbo->rdev; | |
176 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | |
177 | struct radeon_vm *vm = &fpriv->vm; | |
e971bd5e | 178 | struct radeon_bo_va *bo_va; |
d59f7021 | 179 | int r; |
721604a1 | 180 | |
544143f9 AD |
181 | if ((rdev->family < CHIP_CAYMAN) || |
182 | (!rdev->accel_working)) { | |
721604a1 JG |
183 | return; |
184 | } | |
185 | ||
d59f7021 CK |
186 | r = radeon_bo_reserve(rbo, true); |
187 | if (r) { | |
188 | dev_err(rdev->dev, "leaking bo va because " | |
189 | "we fail to reserve bo (%d)\n", r); | |
721604a1 JG |
190 | return; |
191 | } | |
e971bd5e CK |
192 | bo_va = radeon_vm_bo_find(vm, rbo); |
193 | if (bo_va) { | |
194 | if (--bo_va->ref_count == 0) { | |
195 | radeon_vm_bo_rmv(rdev, bo_va); | |
196 | } | |
197 | } | |
721604a1 JG |
198 | radeon_bo_unreserve(rbo); |
199 | } | |
200 | ||
6c6f4783 CK |
201 | static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) |
202 | { | |
203 | if (r == -EDEADLK) { | |
6c6f4783 CK |
204 | r = radeon_gpu_reset(rdev); |
205 | if (!r) | |
206 | r = -EAGAIN; | |
6c6f4783 CK |
207 | } |
208 | return r; | |
209 | } | |
771fe6b9 JG |
210 | |
211 | /* | |
212 | * GEM ioctls. | |
213 | */ | |
214 | int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |
215 | struct drm_file *filp) | |
216 | { | |
217 | struct radeon_device *rdev = dev->dev_private; | |
218 | struct drm_radeon_gem_info *args = data; | |
53595338 DA |
219 | struct ttm_mem_type_manager *man; |
220 | ||
221 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
771fe6b9 | 222 | |
7a50f01a | 223 | args->vram_size = rdev->mc.real_vram_size; |
53595338 | 224 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
ccbe0060 AD |
225 | args->vram_visible -= rdev->vram_pin_size; |
226 | args->gart_size = rdev->mc.gtt_size; | |
227 | args->gart_size -= rdev->gart_pin_size; | |
228 | ||
771fe6b9 JG |
229 | return 0; |
230 | } | |
231 | ||
232 | int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, | |
233 | struct drm_file *filp) | |
234 | { | |
235 | /* TODO: implement */ | |
236 | DRM_ERROR("unimplemented %s\n", __func__); | |
237 | return -ENOSYS; | |
238 | } | |
239 | ||
240 | int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |
241 | struct drm_file *filp) | |
242 | { | |
243 | /* TODO: implement */ | |
244 | DRM_ERROR("unimplemented %s\n", __func__); | |
245 | return -ENOSYS; | |
246 | } | |
247 | ||
248 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |
249 | struct drm_file *filp) | |
250 | { | |
251 | struct radeon_device *rdev = dev->dev_private; | |
252 | struct drm_radeon_gem_create *args = data; | |
253 | struct drm_gem_object *gobj; | |
254 | uint32_t handle; | |
255 | int r; | |
256 | ||
dee53e7f | 257 | down_read(&rdev->exclusive_lock); |
771fe6b9 JG |
258 | /* create a gem object to contain this object in */ |
259 | args->size = roundup(args->size, PAGE_SIZE); | |
260 | r = radeon_gem_object_create(rdev, args->size, args->alignment, | |
02376d82 | 261 | args->initial_domain, args->flags, |
ed5cb43f | 262 | false, &gobj); |
771fe6b9 | 263 | if (r) { |
dee53e7f | 264 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 265 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
266 | return r; |
267 | } | |
268 | r = drm_gem_handle_create(filp, gobj, &handle); | |
29d08b3e DA |
269 | /* drop reference from allocate - handle holds it now */ |
270 | drm_gem_object_unreference_unlocked(gobj); | |
771fe6b9 | 271 | if (r) { |
dee53e7f | 272 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 273 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
274 | return r; |
275 | } | |
771fe6b9 | 276 | args->handle = handle; |
dee53e7f | 277 | up_read(&rdev->exclusive_lock); |
771fe6b9 JG |
278 | return 0; |
279 | } | |
280 | ||
f72a113a CK |
281 | int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, |
282 | struct drm_file *filp) | |
283 | { | |
284 | struct radeon_device *rdev = dev->dev_private; | |
285 | struct drm_radeon_gem_userptr *args = data; | |
286 | struct drm_gem_object *gobj; | |
287 | struct radeon_bo *bo; | |
288 | uint32_t handle; | |
289 | int r; | |
290 | ||
291 | if (offset_in_page(args->addr | args->size)) | |
292 | return -EINVAL; | |
293 | ||
f72a113a | 294 | /* reject unknown flag values */ |
ddd00e33 | 295 | if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | |
341cb9e4 CK |
296 | RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | |
297 | RADEON_GEM_USERPTR_REGISTER)) | |
f72a113a CK |
298 | return -EINVAL; |
299 | ||
bd645e43 CK |
300 | if (args->flags & RADEON_GEM_USERPTR_READONLY) { |
301 | /* readonly pages not tested on older hardware */ | |
302 | if (rdev->family < CHIP_R600) | |
303 | return -EINVAL; | |
304 | ||
305 | } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || | |
306 | !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { | |
307 | ||
308 | /* if we want to write to it we must require anonymous | |
309 | memory and install a MMU notifier */ | |
310 | return -EACCES; | |
311 | } | |
f72a113a CK |
312 | |
313 | down_read(&rdev->exclusive_lock); | |
314 | ||
315 | /* create a gem object to contain this object in */ | |
316 | r = radeon_gem_object_create(rdev, args->size, 0, | |
317 | RADEON_GEM_DOMAIN_CPU, 0, | |
318 | false, &gobj); | |
319 | if (r) | |
320 | goto handle_lockup; | |
321 | ||
322 | bo = gem_to_radeon_bo(gobj); | |
323 | r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); | |
324 | if (r) | |
325 | goto release_object; | |
326 | ||
341cb9e4 CK |
327 | if (args->flags & RADEON_GEM_USERPTR_REGISTER) { |
328 | r = radeon_mn_register(bo, args->addr); | |
329 | if (r) | |
330 | goto release_object; | |
331 | } | |
332 | ||
2a84a447 CK |
333 | if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { |
334 | down_read(¤t->mm->mmap_sem); | |
335 | r = radeon_bo_reserve(bo, true); | |
336 | if (r) { | |
337 | up_read(¤t->mm->mmap_sem); | |
338 | goto release_object; | |
339 | } | |
340 | ||
341 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); | |
342 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
343 | radeon_bo_unreserve(bo); | |
344 | up_read(¤t->mm->mmap_sem); | |
345 | if (r) | |
346 | goto release_object; | |
347 | } | |
348 | ||
f72a113a CK |
349 | r = drm_gem_handle_create(filp, gobj, &handle); |
350 | /* drop reference from allocate - handle holds it now */ | |
351 | drm_gem_object_unreference_unlocked(gobj); | |
352 | if (r) | |
353 | goto handle_lockup; | |
354 | ||
355 | args->handle = handle; | |
356 | up_read(&rdev->exclusive_lock); | |
357 | return 0; | |
358 | ||
359 | release_object: | |
360 | drm_gem_object_unreference_unlocked(gobj); | |
361 | ||
362 | handle_lockup: | |
363 | up_read(&rdev->exclusive_lock); | |
364 | r = radeon_gem_handle_lockup(rdev, r); | |
365 | ||
366 | return r; | |
367 | } | |
368 | ||
771fe6b9 JG |
369 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
370 | struct drm_file *filp) | |
371 | { | |
372 | /* transition the BO to a domain - | |
373 | * just validate the BO into a certain domain */ | |
dee53e7f | 374 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 JG |
375 | struct drm_radeon_gem_set_domain *args = data; |
376 | struct drm_gem_object *gobj; | |
4c788679 | 377 | struct radeon_bo *robj; |
771fe6b9 JG |
378 | int r; |
379 | ||
380 | /* for now if someone requests domain CPU - | |
381 | * just make sure the buffer is finished with */ | |
dee53e7f | 382 | down_read(&rdev->exclusive_lock); |
771fe6b9 JG |
383 | |
384 | /* just do a BO wait for now */ | |
385 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
386 | if (gobj == NULL) { | |
dee53e7f | 387 | up_read(&rdev->exclusive_lock); |
bf79cb91 | 388 | return -ENOENT; |
771fe6b9 | 389 | } |
7e4d15d9 | 390 | robj = gem_to_radeon_bo(gobj); |
771fe6b9 JG |
391 | |
392 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); | |
393 | ||
bc9025bd | 394 | drm_gem_object_unreference_unlocked(gobj); |
dee53e7f | 395 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 396 | r = radeon_gem_handle_lockup(robj->rdev, r); |
771fe6b9 JG |
397 | return r; |
398 | } | |
399 | ||
da6b51d0 DA |
400 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
401 | struct drm_device *dev, | |
402 | uint32_t handle, uint64_t *offset_p) | |
771fe6b9 | 403 | { |
771fe6b9 | 404 | struct drm_gem_object *gobj; |
4c788679 | 405 | struct radeon_bo *robj; |
771fe6b9 | 406 | |
ff72145b | 407 | gobj = drm_gem_object_lookup(dev, filp, handle); |
771fe6b9 | 408 | if (gobj == NULL) { |
bf79cb91 | 409 | return -ENOENT; |
771fe6b9 | 410 | } |
7e4d15d9 | 411 | robj = gem_to_radeon_bo(gobj); |
f72a113a CK |
412 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { |
413 | drm_gem_object_unreference_unlocked(gobj); | |
414 | return -EPERM; | |
415 | } | |
ff72145b | 416 | *offset_p = radeon_bo_mmap_offset(robj); |
bc9025bd | 417 | drm_gem_object_unreference_unlocked(gobj); |
4c788679 | 418 | return 0; |
771fe6b9 JG |
419 | } |
420 | ||
ff72145b DA |
421 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
422 | struct drm_file *filp) | |
423 | { | |
424 | struct drm_radeon_gem_mmap *args = data; | |
425 | ||
da6b51d0 | 426 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); |
ff72145b DA |
427 | } |
428 | ||
771fe6b9 JG |
429 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
430 | struct drm_file *filp) | |
431 | { | |
cefb87ef DA |
432 | struct drm_radeon_gem_busy *args = data; |
433 | struct drm_gem_object *gobj; | |
4c788679 | 434 | struct radeon_bo *robj; |
cefb87ef | 435 | int r; |
4361e52a | 436 | uint32_t cur_placement = 0; |
cefb87ef DA |
437 | |
438 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
439 | if (gobj == NULL) { | |
bf79cb91 | 440 | return -ENOENT; |
cefb87ef | 441 | } |
7e4d15d9 | 442 | robj = gem_to_radeon_bo(gobj); |
828202a3 GG |
443 | |
444 | r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); | |
445 | if (r == 0) | |
446 | r = -EBUSY; | |
447 | else | |
448 | r = 0; | |
449 | ||
450 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); | |
0bc490a8 | 451 | args->domain = radeon_mem_type_to_domain(cur_placement); |
bc9025bd | 452 | drm_gem_object_unreference_unlocked(gobj); |
e3b2415e | 453 | return r; |
771fe6b9 JG |
454 | } |
455 | ||
456 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |
457 | struct drm_file *filp) | |
458 | { | |
1ef5325b | 459 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 JG |
460 | struct drm_radeon_gem_wait_idle *args = data; |
461 | struct drm_gem_object *gobj; | |
4c788679 | 462 | struct radeon_bo *robj; |
39e7f6f8 | 463 | int r = 0; |
404a6a51 | 464 | uint32_t cur_placement = 0; |
39e7f6f8 | 465 | long ret; |
771fe6b9 JG |
466 | |
467 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
468 | if (gobj == NULL) { | |
bf79cb91 | 469 | return -ENOENT; |
771fe6b9 | 470 | } |
7e4d15d9 | 471 | robj = gem_to_radeon_bo(gobj); |
39e7f6f8 ML |
472 | |
473 | ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); | |
474 | if (ret == 0) | |
475 | r = -EBUSY; | |
476 | else if (ret < 0) | |
477 | r = ret; | |
478 | ||
124764f1 | 479 | /* Flush HDP cache via MMIO if necessary */ |
54e03986 | 480 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); |
404a6a51 MD |
481 | if (rdev->asic->mmio_hdp_flush && |
482 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) | |
124764f1 | 483 | robj->rdev->asic->mmio_hdp_flush(rdev); |
bc9025bd | 484 | drm_gem_object_unreference_unlocked(gobj); |
1ef5325b | 485 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
486 | return r; |
487 | } | |
e024e110 DA |
488 | |
489 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |
490 | struct drm_file *filp) | |
491 | { | |
492 | struct drm_radeon_gem_set_tiling *args = data; | |
493 | struct drm_gem_object *gobj; | |
4c788679 | 494 | struct radeon_bo *robj; |
e024e110 DA |
495 | int r = 0; |
496 | ||
497 | DRM_DEBUG("%d \n", args->handle); | |
498 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
499 | if (gobj == NULL) | |
bf79cb91 | 500 | return -ENOENT; |
7e4d15d9 | 501 | robj = gem_to_radeon_bo(gobj); |
4c788679 | 502 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
bc9025bd | 503 | drm_gem_object_unreference_unlocked(gobj); |
e024e110 DA |
504 | return r; |
505 | } | |
506 | ||
507 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |
508 | struct drm_file *filp) | |
509 | { | |
510 | struct drm_radeon_gem_get_tiling *args = data; | |
511 | struct drm_gem_object *gobj; | |
4c788679 | 512 | struct radeon_bo *rbo; |
e024e110 DA |
513 | int r = 0; |
514 | ||
515 | DRM_DEBUG("\n"); | |
516 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
517 | if (gobj == NULL) | |
bf79cb91 | 518 | return -ENOENT; |
7e4d15d9 | 519 | rbo = gem_to_radeon_bo(gobj); |
4c788679 JG |
520 | r = radeon_bo_reserve(rbo, false); |
521 | if (unlikely(r != 0)) | |
51f07b7e | 522 | goto out; |
4c788679 JG |
523 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); |
524 | radeon_bo_unreserve(rbo); | |
51f07b7e | 525 | out: |
bc9025bd | 526 | drm_gem_object_unreference_unlocked(gobj); |
721604a1 JG |
527 | return r; |
528 | } | |
529 | ||
2f2624c2 CK |
530 | /** |
531 | * radeon_gem_va_update_vm -update the bo_va in its VM | |
532 | * | |
533 | * @rdev: radeon_device pointer | |
534 | * @bo_va: bo_va to update | |
535 | * | |
536 | * Update the bo_va directly after setting it's address. Errors are not | |
537 | * vital here, so they are not reported back to userspace. | |
538 | */ | |
539 | static void radeon_gem_va_update_vm(struct radeon_device *rdev, | |
540 | struct radeon_bo_va *bo_va) | |
541 | { | |
542 | struct ttm_validate_buffer tv, *entry; | |
1d0c0942 | 543 | struct radeon_bo_list *vm_bos; |
2f2624c2 CK |
544 | struct ww_acquire_ctx ticket; |
545 | struct list_head list; | |
546 | unsigned domain; | |
547 | int r; | |
548 | ||
549 | INIT_LIST_HEAD(&list); | |
550 | ||
551 | tv.bo = &bo_va->bo->tbo; | |
552 | tv.shared = true; | |
553 | list_add(&tv.head, &list); | |
554 | ||
555 | vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); | |
556 | if (!vm_bos) | |
557 | return; | |
558 | ||
aa35071c | 559 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); |
2f2624c2 CK |
560 | if (r) |
561 | goto error_free; | |
562 | ||
563 | list_for_each_entry(entry, &list, head) { | |
564 | domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); | |
565 | /* if anything is swapped out don't swap it in here, | |
566 | just abort and wait for the next CS */ | |
567 | if (domain == RADEON_GEM_DOMAIN_CPU) | |
568 | goto error_unreserve; | |
569 | } | |
570 | ||
571 | mutex_lock(&bo_va->vm->mutex); | |
572 | r = radeon_vm_clear_freed(rdev, bo_va->vm); | |
573 | if (r) | |
574 | goto error_unlock; | |
575 | ||
576 | if (bo_va->it.start) | |
577 | r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); | |
578 | ||
579 | error_unlock: | |
580 | mutex_unlock(&bo_va->vm->mutex); | |
581 | ||
582 | error_unreserve: | |
583 | ttm_eu_backoff_reservation(&ticket, &list); | |
584 | ||
585 | error_free: | |
586 | drm_free_large(vm_bos); | |
587 | ||
ad1a6222 | 588 | if (r && r != -ERESTARTSYS) |
2f2624c2 CK |
589 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
590 | } | |
591 | ||
721604a1 JG |
592 | int radeon_gem_va_ioctl(struct drm_device *dev, void *data, |
593 | struct drm_file *filp) | |
594 | { | |
595 | struct drm_radeon_gem_va *args = data; | |
596 | struct drm_gem_object *gobj; | |
597 | struct radeon_device *rdev = dev->dev_private; | |
598 | struct radeon_fpriv *fpriv = filp->driver_priv; | |
599 | struct radeon_bo *rbo; | |
600 | struct radeon_bo_va *bo_va; | |
601 | u32 invalid_flags; | |
602 | int r = 0; | |
603 | ||
67e915e4 AD |
604 | if (!rdev->vm_manager.enabled) { |
605 | args->operation = RADEON_VA_RESULT_ERROR; | |
606 | return -ENOTTY; | |
607 | } | |
608 | ||
721604a1 JG |
609 | /* !! DONT REMOVE !! |
610 | * We don't support vm_id yet, to be sure we don't have have broken | |
611 | * userspace, reject anyone trying to use non 0 value thus moving | |
612 | * forward we can use those fields without breaking existant userspace | |
613 | */ | |
614 | if (args->vm_id) { | |
615 | args->operation = RADEON_VA_RESULT_ERROR; | |
616 | return -EINVAL; | |
617 | } | |
618 | ||
619 | if (args->offset < RADEON_VA_RESERVED_SIZE) { | |
620 | dev_err(&dev->pdev->dev, | |
621 | "offset 0x%lX is in reserved area 0x%X\n", | |
622 | (unsigned long)args->offset, | |
623 | RADEON_VA_RESERVED_SIZE); | |
624 | args->operation = RADEON_VA_RESULT_ERROR; | |
625 | return -EINVAL; | |
626 | } | |
627 | ||
628 | /* don't remove, we need to enforce userspace to set the snooped flag | |
629 | * otherwise we will endup with broken userspace and we won't be able | |
630 | * to enable this feature without adding new interface | |
631 | */ | |
632 | invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; | |
633 | if ((args->flags & invalid_flags)) { | |
634 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", | |
635 | args->flags, invalid_flags); | |
636 | args->operation = RADEON_VA_RESULT_ERROR; | |
637 | return -EINVAL; | |
638 | } | |
721604a1 JG |
639 | |
640 | switch (args->operation) { | |
641 | case RADEON_VA_MAP: | |
642 | case RADEON_VA_UNMAP: | |
643 | break; | |
644 | default: | |
645 | dev_err(&dev->pdev->dev, "unsupported operation %d\n", | |
646 | args->operation); | |
647 | args->operation = RADEON_VA_RESULT_ERROR; | |
648 | return -EINVAL; | |
649 | } | |
650 | ||
651 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
652 | if (gobj == NULL) { | |
653 | args->operation = RADEON_VA_RESULT_ERROR; | |
654 | return -ENOENT; | |
655 | } | |
656 | rbo = gem_to_radeon_bo(gobj); | |
657 | r = radeon_bo_reserve(rbo, false); | |
658 | if (r) { | |
659 | args->operation = RADEON_VA_RESULT_ERROR; | |
660 | drm_gem_object_unreference_unlocked(gobj); | |
661 | return r; | |
662 | } | |
e971bd5e CK |
663 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); |
664 | if (!bo_va) { | |
665 | args->operation = RADEON_VA_RESULT_ERROR; | |
186bac81 | 666 | radeon_bo_unreserve(rbo); |
e971bd5e CK |
667 | drm_gem_object_unreference_unlocked(gobj); |
668 | return -ENOENT; | |
669 | } | |
670 | ||
721604a1 JG |
671 | switch (args->operation) { |
672 | case RADEON_VA_MAP: | |
0aea5e4a | 673 | if (bo_va->it.start) { |
721604a1 | 674 | args->operation = RADEON_VA_RESULT_VA_EXIST; |
0aea5e4a | 675 | args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; |
85761f60 | 676 | radeon_bo_unreserve(rbo); |
721604a1 JG |
677 | goto out; |
678 | } | |
e971bd5e | 679 | r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); |
721604a1 JG |
680 | break; |
681 | case RADEON_VA_UNMAP: | |
e971bd5e | 682 | r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); |
721604a1 JG |
683 | break; |
684 | default: | |
685 | break; | |
686 | } | |
2f2624c2 CK |
687 | if (!r) |
688 | radeon_gem_va_update_vm(rdev, bo_va); | |
721604a1 JG |
689 | args->operation = RADEON_VA_RESULT_OK; |
690 | if (r) { | |
691 | args->operation = RADEON_VA_RESULT_ERROR; | |
692 | } | |
693 | out: | |
721604a1 | 694 | drm_gem_object_unreference_unlocked(gobj); |
e024e110 | 695 | return r; |
bda72d58 MO |
696 | } |
697 | ||
698 | int radeon_gem_op_ioctl(struct drm_device *dev, void *data, | |
699 | struct drm_file *filp) | |
700 | { | |
701 | struct drm_radeon_gem_op *args = data; | |
702 | struct drm_gem_object *gobj; | |
703 | struct radeon_bo *robj; | |
704 | int r; | |
705 | ||
706 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
707 | if (gobj == NULL) { | |
708 | return -ENOENT; | |
709 | } | |
710 | robj = gem_to_radeon_bo(gobj); | |
f72a113a CK |
711 | |
712 | r = -EPERM; | |
713 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) | |
714 | goto out; | |
715 | ||
bda72d58 MO |
716 | r = radeon_bo_reserve(robj, false); |
717 | if (unlikely(r)) | |
718 | goto out; | |
719 | ||
720 | switch (args->op) { | |
721 | case RADEON_GEM_OP_GET_INITIAL_DOMAIN: | |
722 | args->value = robj->initial_domain; | |
723 | break; | |
724 | case RADEON_GEM_OP_SET_INITIAL_DOMAIN: | |
725 | robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | | |
726 | RADEON_GEM_DOMAIN_GTT | | |
727 | RADEON_GEM_DOMAIN_CPU); | |
728 | break; | |
729 | default: | |
730 | r = -EINVAL; | |
731 | } | |
732 | ||
733 | radeon_bo_unreserve(robj); | |
734 | out: | |
735 | drm_gem_object_unreference_unlocked(gobj); | |
736 | return r; | |
e024e110 | 737 | } |
ff72145b DA |
738 | |
739 | int radeon_mode_dumb_create(struct drm_file *file_priv, | |
740 | struct drm_device *dev, | |
741 | struct drm_mode_create_dumb *args) | |
742 | { | |
743 | struct radeon_device *rdev = dev->dev_private; | |
744 | struct drm_gem_object *gobj; | |
c87a8d8d | 745 | uint32_t handle; |
ff72145b DA |
746 | int r; |
747 | ||
748 | args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); | |
749 | args->size = args->pitch * args->height; | |
750 | args->size = ALIGN(args->size, PAGE_SIZE); | |
751 | ||
752 | r = radeon_gem_object_create(rdev, args->size, 0, | |
02376d82 | 753 | RADEON_GEM_DOMAIN_VRAM, 0, |
ed5cb43f | 754 | false, &gobj); |
ff72145b DA |
755 | if (r) |
756 | return -ENOMEM; | |
757 | ||
c87a8d8d DA |
758 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
759 | /* drop reference from allocate - handle holds it now */ | |
760 | drm_gem_object_unreference_unlocked(gobj); | |
ff72145b | 761 | if (r) { |
ff72145b DA |
762 | return r; |
763 | } | |
c87a8d8d | 764 | args->handle = handle; |
ff72145b DA |
765 | return 0; |
766 | } | |
767 | ||
409851f4 JG |
768 | #if defined(CONFIG_DEBUG_FS) |
769 | static int radeon_debugfs_gem_info(struct seq_file *m, void *data) | |
770 | { | |
771 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
772 | struct drm_device *dev = node->minor->dev; | |
773 | struct radeon_device *rdev = dev->dev_private; | |
774 | struct radeon_bo *rbo; | |
775 | unsigned i = 0; | |
776 | ||
777 | mutex_lock(&rdev->gem.mutex); | |
778 | list_for_each_entry(rbo, &rdev->gem.objects, list) { | |
779 | unsigned domain; | |
780 | const char *placement; | |
781 | ||
782 | domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); | |
783 | switch (domain) { | |
784 | case RADEON_GEM_DOMAIN_VRAM: | |
785 | placement = "VRAM"; | |
786 | break; | |
787 | case RADEON_GEM_DOMAIN_GTT: | |
788 | placement = " GTT"; | |
789 | break; | |
790 | case RADEON_GEM_DOMAIN_CPU: | |
791 | default: | |
792 | placement = " CPU"; | |
793 | break; | |
794 | } | |
795 | seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", | |
796 | i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, | |
797 | placement, (unsigned long)rbo->pid); | |
798 | i++; | |
799 | } | |
800 | mutex_unlock(&rdev->gem.mutex); | |
801 | return 0; | |
802 | } | |
803 | ||
804 | static struct drm_info_list radeon_debugfs_gem_list[] = { | |
805 | {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, | |
806 | }; | |
807 | #endif | |
808 | ||
809 | int radeon_gem_debugfs_init(struct radeon_device *rdev) | |
810 | { | |
811 | #if defined(CONFIG_DEBUG_FS) | |
812 | return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); | |
813 | #endif | |
814 | return 0; | |
815 | } |