]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drm/amdgpu: job is secure iff CS is secure (v5)
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gem.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/ktime.h>
fdf2f6c5 29#include <linux/module.h>
568d7c76 30#include <linux/pagemap.h>
fdf2f6c5
SR
31#include <linux/pci.h>
32
d38ceaf9 33#include <drm/amdgpu_drm.h>
fdf2f6c5
SR
34#include <drm/drm_debugfs.h>
35
d38ceaf9 36#include "amdgpu.h"
bda31a24 37#include "amdgpu_display.h"
b4ae4fe6 38#include "amdgpu_xgmi.h"
d38ceaf9
AD
39
40void amdgpu_gem_object_free(struct drm_gem_object *gobj)
41{
42 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
43
44 if (robj) {
9298e52f 45 amdgpu_mn_unregister(robj);
d38ceaf9
AD
46 amdgpu_bo_unref(&robj);
47 }
48}
49
50int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
e1eb899b 51 int alignment, u32 initial_domain,
eab3de23 52 u64 flags, enum ttm_bo_type type,
52791eee 53 struct dma_resv *resv,
e1eb899b 54 struct drm_gem_object **obj)
d38ceaf9 55{
e1eb899b 56 struct amdgpu_bo *bo;
3216c6b7 57 struct amdgpu_bo_param bp;
d38ceaf9
AD
58 int r;
59
3216c6b7 60 memset(&bp, 0, sizeof(bp));
d38ceaf9 61 *obj = NULL;
d38ceaf9 62
3216c6b7
CZ
63 bp.size = size;
64 bp.byte_align = alignment;
65 bp.type = type;
66 bp.resv = resv;
aa2b2e28 67 bp.preferred_domain = initial_domain;
08082104 68retry:
3216c6b7
CZ
69 bp.flags = flags;
70 bp.domain = initial_domain;
71 r = amdgpu_bo_create(adev, &bp, &bo);
d38ceaf9 72 if (r) {
08082104
CK
73 if (r != -ERESTARTSYS) {
74 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
75 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
76 goto retry;
77 }
78
79 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
80 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
81 goto retry;
82 }
83 DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
84 size, initial_domain, alignment, r);
85 }
d38ceaf9
AD
86 return r;
87 }
c105de28 88 *obj = &bo->tbo.base;
d38ceaf9 89
d38ceaf9
AD
90 return 0;
91}
92
418aa0c2 93void amdgpu_gem_force_release(struct amdgpu_device *adev)
d38ceaf9 94{
418aa0c2
CK
95 struct drm_device *ddev = adev->ddev;
96 struct drm_file *file;
d38ceaf9 97
1d2ac403 98 mutex_lock(&ddev->filelist_mutex);
418aa0c2
CK
99
100 list_for_each_entry(file, &ddev->filelist, lhead) {
101 struct drm_gem_object *gobj;
102 int handle;
103
104 WARN_ONCE(1, "Still active user space clients!\n");
105 spin_lock(&file->table_lock);
106 idr_for_each_entry(&file->object_idr, gobj, handle) {
107 WARN_ONCE(1, "And also active allocations!\n");
f62facc2 108 drm_gem_object_put_unlocked(gobj);
418aa0c2
CK
109 }
110 idr_destroy(&file->object_idr);
111 spin_unlock(&file->table_lock);
112 }
113
1d2ac403 114 mutex_unlock(&ddev->filelist_mutex);
d38ceaf9
AD
115}
116
117/*
118 * Call from drm_gem_handle_create which appear in both new and open ioctl
119 * case.
120 */
a7d64de6
CK
121int amdgpu_gem_object_open(struct drm_gem_object *obj,
122 struct drm_file *file_priv)
d38ceaf9 123{
765e7fbf 124 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
a7d64de6 125 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
d38ceaf9
AD
126 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
127 struct amdgpu_vm *vm = &fpriv->vm;
128 struct amdgpu_bo_va *bo_va;
4f5839c5 129 struct mm_struct *mm;
d38ceaf9 130 int r;
4f5839c5
CK
131
132 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
133 if (mm && mm != current->mm)
134 return -EPERM;
135
e1eb899b 136 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
5a5011a7 137 abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
e1eb899b
CK
138 return -EPERM;
139
765e7fbf 140 r = amdgpu_bo_reserve(abo, false);
e98c1b0d 141 if (r)
d38ceaf9 142 return r;
d38ceaf9 143
765e7fbf 144 bo_va = amdgpu_vm_bo_find(vm, abo);
d38ceaf9 145 if (!bo_va) {
765e7fbf 146 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
d38ceaf9
AD
147 } else {
148 ++bo_va->ref_count;
149 }
765e7fbf 150 amdgpu_bo_unreserve(abo);
d38ceaf9
AD
151 return 0;
152}
153
154void amdgpu_gem_object_close(struct drm_gem_object *obj,
155 struct drm_file *file_priv)
156{
b5a5ec55 157 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
a7d64de6 158 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
d38ceaf9
AD
159 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
160 struct amdgpu_vm *vm = &fpriv->vm;
b5a5ec55
CK
161
162 struct amdgpu_bo_list_entry vm_pd;
e1eb899b 163 struct list_head list, duplicates;
82c416b1 164 struct dma_fence *fence = NULL;
b5a5ec55
CK
165 struct ttm_validate_buffer tv;
166 struct ww_acquire_ctx ticket;
d38ceaf9 167 struct amdgpu_bo_va *bo_va;
82c416b1 168 long r;
b5a5ec55
CK
169
170 INIT_LIST_HEAD(&list);
e1eb899b 171 INIT_LIST_HEAD(&duplicates);
b5a5ec55
CK
172
173 tv.bo = &bo->tbo;
82c416b1 174 tv.num_shared = 2;
b5a5ec55
CK
175 list_add(&tv.head, &list);
176
177 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
178
9165fb87 179 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
d38ceaf9
AD
180 if (r) {
181 dev_err(adev->dev, "leaking bo va because "
82c416b1 182 "we fail to reserve bo (%ld)\n", r);
d38ceaf9
AD
183 return;
184 }
b5a5ec55 185 bo_va = amdgpu_vm_bo_find(vm, bo);
82c416b1
CK
186 if (!bo_va || --bo_va->ref_count)
187 goto out_unlock;
23e0563e 188
82c416b1
CK
189 amdgpu_vm_bo_rmv(adev, bo_va);
190 if (!amdgpu_vm_ready(vm))
191 goto out_unlock;
23e0563e 192
82c416b1
CK
193 fence = dma_resv_get_excl(bo->tbo.base.resv);
194 if (fence) {
195 amdgpu_bo_fence(bo, fence, true);
196 fence = NULL;
d38ceaf9 197 }
82c416b1
CK
198
199 r = amdgpu_vm_clear_freed(adev, vm, &fence);
200 if (r || !fence)
201 goto out_unlock;
202
203 amdgpu_bo_fence(bo, fence, true);
204 dma_fence_put(fence);
205
206out_unlock:
207 if (unlikely(r < 0))
208 dev_err(adev->dev, "failed to clear page "
209 "tables on GEM object close (%ld)\n", r);
b5a5ec55 210 ttm_eu_backoff_reservation(&ticket, &list);
d38ceaf9
AD
211}
212
d38ceaf9
AD
213/*
214 * GEM ioctls.
215 */
216int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
217 struct drm_file *filp)
218{
219 struct amdgpu_device *adev = dev->dev_private;
e1eb899b
CK
220 struct amdgpu_fpriv *fpriv = filp->driver_priv;
221 struct amdgpu_vm *vm = &fpriv->vm;
d38ceaf9 222 union drm_amdgpu_gem_create *args = data;
6ac7defb 223 uint64_t flags = args->in.domain_flags;
d38ceaf9 224 uint64_t size = args->in.bo_size;
52791eee 225 struct dma_resv *resv = NULL;
d38ceaf9
AD
226 struct drm_gem_object *gobj;
227 uint32_t handle;
d38ceaf9
AD
228 int r;
229
834e0f8a 230 /* reject invalid gem flags */
6ac7defb
CK
231 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
232 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
233 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
e1eb899b 234 AMDGPU_GEM_CREATE_VRAM_CLEARED |
177ae09b
AR
235 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
236 AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
237
a022c54e
CK
238 return -EINVAL;
239
834e0f8a 240 /* reject invalid gem domains */
3f188453 241 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
a022c54e 242 return -EINVAL;
834e0f8a 243
d38ceaf9
AD
244 /* create a gem object to contain this object in */
245 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
246 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
ee5309d5
CZ
247 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
248 /* if gds bo is created from user space, it must be
249 * passed to bo list
250 */
251 DRM_ERROR("GDS bo cannot be per-vm-bo\n");
252 return -EINVAL;
253 }
6ac7defb 254 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
d38ceaf9 255 }
d38ceaf9 256
e1eb899b
CK
257 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
258 r = amdgpu_bo_reserve(vm->root.base.bo, false);
259 if (r)
260 return r;
261
5a5011a7 262 resv = vm->root.base.bo->tbo.base.resv;
e1eb899b
CK
263 }
264
d38ceaf9
AD
265 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
266 (u32)(0xffffffff & args->in.domains),
f8ddb39a 267 flags, ttm_bo_type_device, resv, &gobj);
e1eb899b
CK
268 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
269 if (!r) {
270 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
271
272 abo->parent = amdgpu_bo_ref(vm->root.base.bo);
273 }
274 amdgpu_bo_unreserve(vm->root.base.bo);
275 }
d38ceaf9 276 if (r)
a022c54e 277 return r;
d38ceaf9
AD
278
279 r = drm_gem_handle_create(filp, gobj, &handle);
280 /* drop reference from allocate - handle holds it now */
f62facc2 281 drm_gem_object_put_unlocked(gobj);
d38ceaf9 282 if (r)
a022c54e 283 return r;
d38ceaf9
AD
284
285 memset(args, 0, sizeof(*args));
286 args->out.handle = handle;
d38ceaf9 287 return 0;
d38ceaf9
AD
288}
289
290int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
291 struct drm_file *filp)
292{
19be5570 293 struct ttm_operation_ctx ctx = { true, false };
d38ceaf9
AD
294 struct amdgpu_device *adev = dev->dev_private;
295 struct drm_amdgpu_gem_userptr *args = data;
296 struct drm_gem_object *gobj;
297 struct amdgpu_bo *bo;
298 uint32_t handle;
299 int r;
300
35f3fc87
AK
301 args->addr = untagged_addr(args->addr);
302
d38ceaf9
AD
303 if (offset_in_page(args->addr | args->size))
304 return -EINVAL;
305
306 /* reject unknown flag values */
307 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
308 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
309 AMDGPU_GEM_USERPTR_REGISTER))
310 return -EINVAL;
311
358c258a
CK
312 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
313 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
d38ceaf9 314
358c258a 315 /* if we want to write to it we must install a MMU notifier */
d38ceaf9
AD
316 return -EACCES;
317 }
318
d38ceaf9 319 /* create a gem object to contain this object in */
e1eb899b 320 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
f8ddb39a 321 0, ttm_bo_type_device, NULL, &gobj);
d38ceaf9 322 if (r)
a022c54e 323 return r;
d38ceaf9
AD
324
325 bo = gem_to_amdgpu_bo(gobj);
6d7d9c5a 326 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1ea863fd 327 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
d38ceaf9
AD
328 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
329 if (r)
330 goto release_object;
331
332 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
333 r = amdgpu_mn_register(bo, args->addr);
334 if (r)
335 goto release_object;
336 }
337
338 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
e5eaa7cc 339 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2f568dbd 340 if (r)
d5a480b4 341 goto release_object;
2f568dbd 342
d38ceaf9 343 r = amdgpu_bo_reserve(bo, true);
2f568dbd 344 if (r)
899fbde1 345 goto user_pages_done;
d38ceaf9 346
c704ab18 347 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
19be5570 348 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
d38ceaf9 349 amdgpu_bo_unreserve(bo);
d38ceaf9 350 if (r)
899fbde1 351 goto user_pages_done;
d38ceaf9
AD
352 }
353
354 r = drm_gem_handle_create(filp, gobj, &handle);
d38ceaf9 355 if (r)
899fbde1 356 goto user_pages_done;
d38ceaf9
AD
357
358 args->handle = handle;
d38ceaf9 359
899fbde1
PY
360user_pages_done:
361 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
362 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2f568dbd 363
d38ceaf9 364release_object:
f62facc2 365 drm_gem_object_put_unlocked(gobj);
d38ceaf9 366
d38ceaf9
AD
367 return r;
368}
369
370int amdgpu_mode_dumb_mmap(struct drm_file *filp,
371 struct drm_device *dev,
372 uint32_t handle, uint64_t *offset_p)
373{
374 struct drm_gem_object *gobj;
375 struct amdgpu_bo *robj;
376
a8ad0bd8 377 gobj = drm_gem_object_lookup(filp, handle);
d38ceaf9
AD
378 if (gobj == NULL) {
379 return -ENOENT;
380 }
381 robj = gem_to_amdgpu_bo(gobj);
cc325d19 382 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
271c8125 383 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
f62facc2 384 drm_gem_object_put_unlocked(gobj);
d38ceaf9
AD
385 return -EPERM;
386 }
387 *offset_p = amdgpu_bo_mmap_offset(robj);
f62facc2 388 drm_gem_object_put_unlocked(gobj);
d38ceaf9
AD
389 return 0;
390}
391
392int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
393 struct drm_file *filp)
394{
395 union drm_amdgpu_gem_mmap *args = data;
396 uint32_t handle = args->in.handle;
397 memset(args, 0, sizeof(*args));
398 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
399}
400
401/**
402 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
403 *
404 * @timeout_ns: timeout in ns
405 *
406 * Calculate the timeout in jiffies from an absolute timeout in ns.
407 */
408unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
409{
410 unsigned long timeout_jiffies;
411 ktime_t timeout;
412
413 /* clamp timeout if it's to large */
414 if (((int64_t)timeout_ns) < 0)
415 return MAX_SCHEDULE_TIMEOUT;
416
0f117704 417 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
d38ceaf9
AD
418 if (ktime_to_ns(timeout) < 0)
419 return 0;
420
421 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
422 /* clamp timeout to avoid unsigned-> signed overflow */
423 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
424 return MAX_SCHEDULE_TIMEOUT - 1;
425
426 return timeout_jiffies;
427}
428
429int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
430 struct drm_file *filp)
431{
d38ceaf9
AD
432 union drm_amdgpu_gem_wait_idle *args = data;
433 struct drm_gem_object *gobj;
434 struct amdgpu_bo *robj;
435 uint32_t handle = args->in.handle;
436 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
437 int r = 0;
438 long ret;
439
a8ad0bd8 440 gobj = drm_gem_object_lookup(filp, handle);
d38ceaf9
AD
441 if (gobj == NULL) {
442 return -ENOENT;
443 }
444 robj = gem_to_amdgpu_bo(gobj);
52791eee 445 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
0fea2ed6 446 timeout);
d38ceaf9
AD
447
448 /* ret == 0 means not signaled,
449 * ret > 0 means signaled
450 * ret < 0 means interrupted before timeout
451 */
452 if (ret >= 0) {
453 memset(args, 0, sizeof(*args));
454 args->out.status = (ret == 0);
455 } else
456 r = ret;
457
f62facc2 458 drm_gem_object_put_unlocked(gobj);
d38ceaf9
AD
459 return r;
460}
461
462int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
463 struct drm_file *filp)
464{
465 struct drm_amdgpu_gem_metadata *args = data;
466 struct drm_gem_object *gobj;
467 struct amdgpu_bo *robj;
468 int r = -1;
469
470 DRM_DEBUG("%d \n", args->handle);
a8ad0bd8 471 gobj = drm_gem_object_lookup(filp, args->handle);
d38ceaf9
AD
472 if (gobj == NULL)
473 return -ENOENT;
474 robj = gem_to_amdgpu_bo(gobj);
475
476 r = amdgpu_bo_reserve(robj, false);
477 if (unlikely(r != 0))
478 goto out;
479
480 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
481 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
482 r = amdgpu_bo_get_metadata(robj, args->data.data,
483 sizeof(args->data.data),
484 &args->data.data_size_bytes,
485 &args->data.flags);
486 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
0913eab6
DC
487 if (args->data.data_size_bytes > sizeof(args->data.data)) {
488 r = -EINVAL;
489 goto unreserve;
490 }
d38ceaf9
AD
491 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
492 if (!r)
493 r = amdgpu_bo_set_metadata(robj, args->data.data,
494 args->data.data_size_bytes,
495 args->data.flags);
496 }
497
0913eab6 498unreserve:
d38ceaf9
AD
499 amdgpu_bo_unreserve(robj);
500out:
f62facc2 501 drm_gem_object_put_unlocked(gobj);
d38ceaf9
AD
502 return r;
503}
504
505/**
506 * amdgpu_gem_va_update_vm -update the bo_va in its VM
507 *
508 * @adev: amdgpu_device pointer
dc54d3d1 509 * @vm: vm to update
d38ceaf9 510 * @bo_va: bo_va to update
dc54d3d1 511 * @operation: map, unmap or clear
d38ceaf9 512 *
2ffdaafb 513 * Update the bo_va directly after setting its address. Errors are not
d38ceaf9
AD
514 * vital here, so they are not reported back to userspace.
515 */
516static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
dc54d3d1 517 struct amdgpu_vm *vm,
f7da30d9
CK
518 struct amdgpu_bo_va *bo_va,
519 uint32_t operation)
d38ceaf9 520{
3f3333f8 521 int r;
d38ceaf9 522
3f3333f8
CK
523 if (!amdgpu_vm_ready(vm))
524 return;
e410b5cb 525
f3467818 526 r = amdgpu_vm_clear_freed(adev, vm, NULL);
d38ceaf9 527 if (r)
2ffdaafb 528 goto error;
194a3364 529
80f95c57 530 if (operation == AMDGPU_VA_OP_MAP ||
93bab704 531 operation == AMDGPU_VA_OP_REPLACE) {
05dcb5c8 532 r = amdgpu_vm_bo_update(adev, bo_va, false);
93bab704
GS
533 if (r)
534 goto error;
535 }
d38ceaf9 536
807e2994 537 r = amdgpu_vm_update_pdes(adev, vm, false);
0abc6878 538
2ffdaafb 539error:
68fdd3df 540 if (r && r != -ERESTARTSYS)
d38ceaf9
AD
541 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
542}
543
71776b6d
CK
544/**
545 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
546 *
547 * @adev: amdgpu_device pointer
548 * @flags: GEM UAPI flags
549 *
550 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
551 */
552uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
553{
554 uint64_t pte_flag = 0;
555
556 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
557 pte_flag |= AMDGPU_PTE_EXECUTABLE;
558 if (flags & AMDGPU_VM_PAGE_READABLE)
559 pte_flag |= AMDGPU_PTE_READABLE;
560 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
561 pte_flag |= AMDGPU_PTE_WRITEABLE;
562 if (flags & AMDGPU_VM_PAGE_PRT)
563 pte_flag |= AMDGPU_PTE_PRT;
564
565 if (adev->gmc.gmc_funcs->map_mtype)
566 pte_flag |= amdgpu_gmc_map_mtype(adev,
567 flags & AMDGPU_VM_MTYPE_MASK);
568
569 return pte_flag;
570}
571
d38ceaf9
AD
572int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
573 struct drm_file *filp)
574{
b85891bd
JZ
575 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
576 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
66e02bc3 577 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
b85891bd
JZ
578 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
579 AMDGPU_VM_PAGE_PRT;
580
34b5f6a6 581 struct drm_amdgpu_gem_va *args = data;
d38ceaf9
AD
582 struct drm_gem_object *gobj;
583 struct amdgpu_device *adev = dev->dev_private;
584 struct amdgpu_fpriv *fpriv = filp->driver_priv;
765e7fbf 585 struct amdgpu_bo *abo;
d38ceaf9 586 struct amdgpu_bo_va *bo_va;
b88c8796
CK
587 struct amdgpu_bo_list_entry vm_pd;
588 struct ttm_validate_buffer tv;
49b02b18 589 struct ww_acquire_ctx ticket;
e1eb899b 590 struct list_head list, duplicates;
5463545b 591 uint64_t va_flags;
d38ceaf9
AD
592 int r = 0;
593
34b5f6a6 594 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
4b7f0848 595 dev_dbg(&dev->pdev->dev,
ff4cd389
CK
596 "va_address 0x%LX is in reserved area 0x%LX\n",
597 args->va_address, AMDGPU_VA_RESERVED_SIZE);
d38ceaf9
AD
598 return -EINVAL;
599 }
600
ad9a5b78
CK
601 if (args->va_address >= AMDGPU_GMC_HOLE_START &&
602 args->va_address < AMDGPU_GMC_HOLE_END) {
bb7939b2
CK
603 dev_dbg(&dev->pdev->dev,
604 "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
ad9a5b78
CK
605 args->va_address, AMDGPU_GMC_HOLE_START,
606 AMDGPU_GMC_HOLE_END);
bb7939b2
CK
607 return -EINVAL;
608 }
609
ad9a5b78 610 args->va_address &= AMDGPU_GMC_HOLE_MASK;
bb7939b2 611
b85891bd 612 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
4b7f0848 613 dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
b85891bd 614 args->flags);
d38ceaf9
AD
615 return -EINVAL;
616 }
617
34b5f6a6 618 switch (args->operation) {
d38ceaf9
AD
619 case AMDGPU_VA_OP_MAP:
620 case AMDGPU_VA_OP_UNMAP:
dc54d3d1 621 case AMDGPU_VA_OP_CLEAR:
80f95c57 622 case AMDGPU_VA_OP_REPLACE:
d38ceaf9
AD
623 break;
624 default:
4b7f0848 625 dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
34b5f6a6 626 args->operation);
d38ceaf9
AD
627 return -EINVAL;
628 }
629
49b02b18 630 INIT_LIST_HEAD(&list);
e1eb899b 631 INIT_LIST_HEAD(&duplicates);
dc54d3d1
CK
632 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
633 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
b85891bd
JZ
634 gobj = drm_gem_object_lookup(filp, args->handle);
635 if (gobj == NULL)
636 return -ENOENT;
637 abo = gem_to_amdgpu_bo(gobj);
638 tv.bo = &abo->tbo;
a9f34c70
CK
639 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
640 tv.num_shared = 1;
641 else
642 tv.num_shared = 0;
b85891bd
JZ
643 list_add(&tv.head, &list);
644 } else {
645 gobj = NULL;
646 abo = NULL;
647 }
49b02b18 648
b88c8796 649 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
b5a5ec55 650
9165fb87 651 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
b85891bd
JZ
652 if (r)
653 goto error_unref;
34b5f6a6 654
b85891bd
JZ
655 if (abo) {
656 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
657 if (!bo_va) {
658 r = -ENOENT;
659 goto error_backoff;
660 }
dc54d3d1 661 } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
b85891bd 662 bo_va = fpriv->prt_va;
dc54d3d1
CK
663 } else {
664 bo_va = NULL;
d38ceaf9
AD
665 }
666
34b5f6a6 667 switch (args->operation) {
d38ceaf9 668 case AMDGPU_VA_OP_MAP:
71776b6d 669 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
34b5f6a6
CK
670 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
671 args->offset_in_bo, args->map_size,
9f7eb536 672 va_flags);
d38ceaf9
AD
673 break;
674 case AMDGPU_VA_OP_UNMAP:
34b5f6a6 675 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
d38ceaf9 676 break;
dc54d3d1
CK
677
678 case AMDGPU_VA_OP_CLEAR:
679 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
680 args->va_address,
681 args->map_size);
682 break;
80f95c57 683 case AMDGPU_VA_OP_REPLACE:
71776b6d 684 va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
80f95c57
CK
685 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
686 args->offset_in_bo, args->map_size,
687 va_flags);
688 break;
d38ceaf9
AD
689 default:
690 break;
691 }
b85891bd 692 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
59d61be2 693 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
dc54d3d1 694 args->operation);
b85891bd
JZ
695
696error_backoff:
2ffdaafb 697 ttm_eu_backoff_reservation(&ticket, &list);
e98c1b0d 698
b85891bd 699error_unref:
f62facc2 700 drm_gem_object_put_unlocked(gobj);
d38ceaf9
AD
701 return r;
702}
703
704int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *filp)
706{
e1eb899b 707 struct amdgpu_device *adev = dev->dev_private;
d38ceaf9
AD
708 struct drm_amdgpu_gem_op *args = data;
709 struct drm_gem_object *gobj;
b4ae4fe6 710 struct amdgpu_vm_bo_base *base;
d38ceaf9
AD
711 struct amdgpu_bo *robj;
712 int r;
713
a8ad0bd8 714 gobj = drm_gem_object_lookup(filp, args->handle);
d38ceaf9
AD
715 if (gobj == NULL) {
716 return -ENOENT;
717 }
718 robj = gem_to_amdgpu_bo(gobj);
719
720 r = amdgpu_bo_reserve(robj, false);
721 if (unlikely(r))
722 goto out;
723
724 switch (args->op) {
725 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
726 struct drm_amdgpu_gem_create_in info;
7ecc245a 727 void __user *out = u64_to_user_ptr(args->value);
d38ceaf9 728
c105de28 729 info.bo_size = robj->tbo.base.size;
d38ceaf9 730 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
6d7d9c5a 731 info.domains = robj->preferred_domains;
d38ceaf9 732 info.domain_flags = robj->flags;
4c28fb0b 733 amdgpu_bo_unreserve(robj);
d38ceaf9
AD
734 if (copy_to_user(out, &info, sizeof(info)))
735 r = -EFAULT;
736 break;
737 }
d8f65a23 738 case AMDGPU_GEM_OP_SET_PLACEMENT:
803d89ad
CJHR
739 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
740 r = -EINVAL;
741 amdgpu_bo_unreserve(robj);
742 break;
743 }
cc325d19 744 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
d38ceaf9 745 r = -EPERM;
4c28fb0b 746 amdgpu_bo_unreserve(robj);
d38ceaf9
AD
747 break;
748 }
b4ae4fe6 749 for (base = robj->vm_bo; base; base = base->next)
750 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
751 amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
752 r = -EINVAL;
753 amdgpu_bo_unreserve(robj);
754 goto out;
755 }
756
757
6d7d9c5a 758 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
1ea863fd
CK
759 AMDGPU_GEM_DOMAIN_GTT |
760 AMDGPU_GEM_DOMAIN_CPU);
6d7d9c5a 761 robj->allowed_domains = robj->preferred_domains;
1ea863fd
CK
762 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
763 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
764
e1eb899b
CK
765 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
766 amdgpu_vm_bo_invalidate(adev, robj, true);
767
4c28fb0b 768 amdgpu_bo_unreserve(robj);
d38ceaf9
AD
769 break;
770 default:
4c28fb0b 771 amdgpu_bo_unreserve(robj);
d38ceaf9
AD
772 r = -EINVAL;
773 }
774
d38ceaf9 775out:
f62facc2 776 drm_gem_object_put_unlocked(gobj);
d38ceaf9
AD
777 return r;
778}
779
780int amdgpu_mode_dumb_create(struct drm_file *file_priv,
781 struct drm_device *dev,
782 struct drm_mode_create_dumb *args)
783{
784 struct amdgpu_device *adev = dev->dev_private;
785 struct drm_gem_object *gobj;
786 uint32_t handle;
e4c4073b
AG
787 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
788 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
84b74608 789 u32 domain;
d38ceaf9
AD
790 int r;
791
46846ba2
NK
792 /*
793 * The buffer returned from this function should be cleared, but
794 * it can only be done if the ring is enabled or we'll fail to
795 * create the buffer.
796 */
797 if (adev->mman.buffer_funcs_enabled)
798 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
799
8e911ab7
LP
800 args->pitch = amdgpu_align_pitch(adev, args->width,
801 DIV_ROUND_UP(args->bpp, 8), 0);
54ef0b54 802 args->size = (u64)args->pitch * args->height;
d38ceaf9 803 args->size = ALIGN(args->size, PAGE_SIZE);
84b74608 804 domain = amdgpu_bo_get_preferred_pin_domain(adev,
f2bd8a0e 805 amdgpu_display_supported_domains(adev, flags));
46846ba2 806 r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
f8ddb39a 807 ttm_bo_type_device, NULL, &gobj);
d38ceaf9
AD
808 if (r)
809 return -ENOMEM;
810
811 r = drm_gem_handle_create(file_priv, gobj, &handle);
812 /* drop reference from allocate - handle holds it now */
f62facc2 813 drm_gem_object_put_unlocked(gobj);
d38ceaf9
AD
814 if (r) {
815 return r;
816 }
817 args->handle = handle;
818 return 0;
819}
820
821#if defined(CONFIG_DEBUG_FS)
6b155d6a
CK
822
823#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \
824 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
825 seq_printf((m), " " #flag); \
826 }
827
7ea23565
CK
828static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
829{
830 struct drm_gem_object *gobj = ptr;
831 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
832 struct seq_file *m = data;
833
b1f223c0
CK
834 struct dma_buf_attachment *attachment;
835 struct dma_buf *dma_buf;
7ea23565
CK
836 unsigned domain;
837 const char *placement;
838 unsigned pin_count;
839
840 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
841 switch (domain) {
842 case AMDGPU_GEM_DOMAIN_VRAM:
843 placement = "VRAM";
844 break;
845 case AMDGPU_GEM_DOMAIN_GTT:
846 placement = " GTT";
847 break;
848 case AMDGPU_GEM_DOMAIN_CPU:
849 default:
850 placement = " CPU";
851 break;
852 }
b8e0e6e1
CK
853 seq_printf(m, "\t0x%08x: %12ld byte %s",
854 id, amdgpu_bo_size(bo), placement);
855
6aa7de05 856 pin_count = READ_ONCE(bo->pin_count);
7ea23565
CK
857 if (pin_count)
858 seq_printf(m, " pin count %d", pin_count);
b1f223c0 859
c105de28
GH
860 dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
861 attachment = READ_ONCE(bo->tbo.base.import_attach);
b1f223c0
CK
862
863 if (attachment)
864 seq_printf(m, " imported from %p", dma_buf);
865 else if (dma_buf)
866 seq_printf(m, " exported as %p", dma_buf);
867
6b155d6a
CK
868 amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
869 amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
870 amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
871 amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
872 amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
873 amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
874 amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
875 amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
876
7ea23565
CK
877 seq_printf(m, "\n");
878
879 return 0;
880}
881
d38ceaf9
AD
882static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
883{
884 struct drm_info_node *node = (struct drm_info_node *)m->private;
885 struct drm_device *dev = node->minor->dev;
7ea23565
CK
886 struct drm_file *file;
887 int r;
d38ceaf9 888
1d2ac403 889 r = mutex_lock_interruptible(&dev->filelist_mutex);
7ea23565
CK
890 if (r)
891 return r;
892
893 list_for_each_entry(file, &dev->filelist, lhead) {
894 struct task_struct *task;
895
896 /*
897 * Although we have a valid reference on file->pid, that does
898 * not guarantee that the task_struct who called get_pid() is
899 * still alive (e.g. get_pid(current) => fork() => exit()).
900 * Therefore, we need to protect this ->comm access using RCU.
901 */
902 rcu_read_lock();
903 task = pid_task(file->pid, PIDTYPE_PID);
904 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
905 task ? task->comm : "<unknown>");
906 rcu_read_unlock();
907
908 spin_lock(&file->table_lock);
909 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
910 spin_unlock(&file->table_lock);
d38ceaf9 911 }
7ea23565 912
1d2ac403 913 mutex_unlock(&dev->filelist_mutex);
d38ceaf9
AD
914 return 0;
915}
916
06ab6832 917static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
d38ceaf9
AD
918 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
919};
920#endif
921
75758255 922int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
d38ceaf9
AD
923{
924#if defined(CONFIG_DEBUG_FS)
925 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
926#endif
927 return 0;
928}