2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/dma-buf.h>
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_debugfs.h>
38 #include "amdgpu_display.h"
39 #include "amdgpu_xgmi.h"
41 void amdgpu_gem_object_free(struct drm_gem_object
*gobj
)
43 struct amdgpu_bo
*robj
= gem_to_amdgpu_bo(gobj
);
46 amdgpu_mn_unregister(robj
);
47 amdgpu_bo_unref(&robj
);
51 int amdgpu_gem_object_create(struct amdgpu_device
*adev
, unsigned long size
,
52 int alignment
, u32 initial_domain
,
53 u64 flags
, enum ttm_bo_type type
,
54 struct dma_resv
*resv
,
55 struct drm_gem_object
**obj
)
58 struct amdgpu_bo_param bp
;
61 memset(&bp
, 0, sizeof(bp
));
65 bp
.byte_align
= alignment
;
68 bp
.preferred_domain
= initial_domain
;
71 bp
.domain
= initial_domain
;
72 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
74 if (r
!= -ERESTARTSYS
) {
75 if (flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
) {
76 flags
&= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
80 if (initial_domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
81 initial_domain
|= AMDGPU_GEM_DOMAIN_GTT
;
84 DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
85 size
, initial_domain
, alignment
, r
);
94 void amdgpu_gem_force_release(struct amdgpu_device
*adev
)
96 struct drm_device
*ddev
= adev
->ddev
;
97 struct drm_file
*file
;
99 mutex_lock(&ddev
->filelist_mutex
);
101 list_for_each_entry(file
, &ddev
->filelist
, lhead
) {
102 struct drm_gem_object
*gobj
;
105 WARN_ONCE(1, "Still active user space clients!\n");
106 spin_lock(&file
->table_lock
);
107 idr_for_each_entry(&file
->object_idr
, gobj
, handle
) {
108 WARN_ONCE(1, "And also active allocations!\n");
109 drm_gem_object_put_unlocked(gobj
);
111 idr_destroy(&file
->object_idr
);
112 spin_unlock(&file
->table_lock
);
115 mutex_unlock(&ddev
->filelist_mutex
);
119 * Call from drm_gem_handle_create which appear in both new and open ioctl
122 int amdgpu_gem_object_open(struct drm_gem_object
*obj
,
123 struct drm_file
*file_priv
)
125 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(obj
);
126 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
127 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
128 struct amdgpu_vm
*vm
= &fpriv
->vm
;
129 struct amdgpu_bo_va
*bo_va
;
130 struct mm_struct
*mm
;
133 mm
= amdgpu_ttm_tt_get_usermm(abo
->tbo
.ttm
);
134 if (mm
&& mm
!= current
->mm
)
137 if (abo
->flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
&&
138 abo
->tbo
.base
.resv
!= vm
->root
.base
.bo
->tbo
.base
.resv
)
141 r
= amdgpu_bo_reserve(abo
, false);
145 bo_va
= amdgpu_vm_bo_find(vm
, abo
);
147 bo_va
= amdgpu_vm_bo_add(adev
, vm
, abo
);
151 amdgpu_bo_unreserve(abo
);
155 void amdgpu_gem_object_close(struct drm_gem_object
*obj
,
156 struct drm_file
*file_priv
)
158 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
159 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
160 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
161 struct amdgpu_vm
*vm
= &fpriv
->vm
;
163 struct amdgpu_bo_list_entry vm_pd
;
164 struct list_head list
, duplicates
;
165 struct dma_fence
*fence
= NULL
;
166 struct ttm_validate_buffer tv
;
167 struct ww_acquire_ctx ticket
;
168 struct amdgpu_bo_va
*bo_va
;
171 INIT_LIST_HEAD(&list
);
172 INIT_LIST_HEAD(&duplicates
);
176 list_add(&tv
.head
, &list
);
178 amdgpu_vm_get_pd_bo(vm
, &list
, &vm_pd
);
180 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, &duplicates
);
182 dev_err(adev
->dev
, "leaking bo va because "
183 "we fail to reserve bo (%ld)\n", r
);
186 bo_va
= amdgpu_vm_bo_find(vm
, bo
);
187 if (!bo_va
|| --bo_va
->ref_count
)
190 amdgpu_vm_bo_rmv(adev
, bo_va
);
191 if (!amdgpu_vm_ready(vm
))
194 fence
= dma_resv_get_excl(bo
->tbo
.base
.resv
);
196 amdgpu_bo_fence(bo
, fence
, true);
200 r
= amdgpu_vm_clear_freed(adev
, vm
, &fence
);
204 amdgpu_bo_fence(bo
, fence
, true);
205 dma_fence_put(fence
);
209 dev_err(adev
->dev
, "failed to clear page "
210 "tables on GEM object close (%ld)\n", r
);
211 ttm_eu_backoff_reservation(&ticket
, &list
);
217 int amdgpu_gem_create_ioctl(struct drm_device
*dev
, void *data
,
218 struct drm_file
*filp
)
220 struct amdgpu_device
*adev
= dev
->dev_private
;
221 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
222 struct amdgpu_vm
*vm
= &fpriv
->vm
;
223 union drm_amdgpu_gem_create
*args
= data
;
224 uint64_t flags
= args
->in
.domain_flags
;
225 uint64_t size
= args
->in
.bo_size
;
226 struct dma_resv
*resv
= NULL
;
227 struct drm_gem_object
*gobj
;
231 /* reject invalid gem flags */
232 if (flags
& ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
233 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
234 AMDGPU_GEM_CREATE_CPU_GTT_USWC
|
235 AMDGPU_GEM_CREATE_VRAM_CLEARED
|
236 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
|
237 AMDGPU_GEM_CREATE_EXPLICIT_SYNC
|
238 AMDGPU_GEM_CREATE_ENCRYPTED
))
242 /* reject invalid gem domains */
243 if (args
->in
.domains
& ~AMDGPU_GEM_DOMAIN_MASK
)
246 if (!amdgpu_is_tmz(adev
) && (flags
& AMDGPU_GEM_CREATE_ENCRYPTED
)) {
247 DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
251 /* create a gem object to contain this object in */
252 if (args
->in
.domains
& (AMDGPU_GEM_DOMAIN_GDS
|
253 AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
)) {
254 if (flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
) {
255 /* if gds bo is created from user space, it must be
258 DRM_ERROR("GDS bo cannot be per-vm-bo\n");
261 flags
|= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
264 if (flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
) {
265 r
= amdgpu_bo_reserve(vm
->root
.base
.bo
, false);
269 resv
= vm
->root
.base
.bo
->tbo
.base
.resv
;
272 r
= amdgpu_gem_object_create(adev
, size
, args
->in
.alignment
,
273 (u32
)(0xffffffff & args
->in
.domains
),
274 flags
, ttm_bo_type_device
, resv
, &gobj
);
275 if (flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
) {
277 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(gobj
);
279 abo
->parent
= amdgpu_bo_ref(vm
->root
.base
.bo
);
281 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
286 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
287 /* drop reference from allocate - handle holds it now */
288 drm_gem_object_put_unlocked(gobj
);
292 memset(args
, 0, sizeof(*args
));
293 args
->out
.handle
= handle
;
297 int amdgpu_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
298 struct drm_file
*filp
)
300 struct ttm_operation_ctx ctx
= { true, false };
301 struct amdgpu_device
*adev
= dev
->dev_private
;
302 struct drm_amdgpu_gem_userptr
*args
= data
;
303 struct drm_gem_object
*gobj
;
304 struct amdgpu_bo
*bo
;
308 args
->addr
= untagged_addr(args
->addr
);
310 if (offset_in_page(args
->addr
| args
->size
))
313 /* reject unknown flag values */
314 if (args
->flags
& ~(AMDGPU_GEM_USERPTR_READONLY
|
315 AMDGPU_GEM_USERPTR_ANONONLY
| AMDGPU_GEM_USERPTR_VALIDATE
|
316 AMDGPU_GEM_USERPTR_REGISTER
))
319 if (!(args
->flags
& AMDGPU_GEM_USERPTR_READONLY
) &&
320 !(args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
)) {
322 /* if we want to write to it we must install a MMU notifier */
326 /* create a gem object to contain this object in */
327 r
= amdgpu_gem_object_create(adev
, args
->size
, 0, AMDGPU_GEM_DOMAIN_CPU
,
328 0, ttm_bo_type_device
, NULL
, &gobj
);
332 bo
= gem_to_amdgpu_bo(gobj
);
333 bo
->preferred_domains
= AMDGPU_GEM_DOMAIN_GTT
;
334 bo
->allowed_domains
= AMDGPU_GEM_DOMAIN_GTT
;
335 r
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
339 if (args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
) {
340 r
= amdgpu_mn_register(bo
, args
->addr
);
345 if (args
->flags
& AMDGPU_GEM_USERPTR_VALIDATE
) {
346 r
= amdgpu_ttm_tt_get_user_pages(bo
, bo
->tbo
.ttm
->pages
);
350 r
= amdgpu_bo_reserve(bo
, true);
352 goto user_pages_done
;
354 amdgpu_bo_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_GTT
);
355 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
356 amdgpu_bo_unreserve(bo
);
358 goto user_pages_done
;
361 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
363 goto user_pages_done
;
365 args
->handle
= handle
;
368 if (args
->flags
& AMDGPU_GEM_USERPTR_VALIDATE
)
369 amdgpu_ttm_tt_get_user_pages_done(bo
->tbo
.ttm
);
372 drm_gem_object_put_unlocked(gobj
);
377 int amdgpu_mode_dumb_mmap(struct drm_file
*filp
,
378 struct drm_device
*dev
,
379 uint32_t handle
, uint64_t *offset_p
)
381 struct drm_gem_object
*gobj
;
382 struct amdgpu_bo
*robj
;
384 gobj
= drm_gem_object_lookup(filp
, handle
);
388 robj
= gem_to_amdgpu_bo(gobj
);
389 if (amdgpu_ttm_tt_get_usermm(robj
->tbo
.ttm
) ||
390 (robj
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)) {
391 drm_gem_object_put_unlocked(gobj
);
394 *offset_p
= amdgpu_bo_mmap_offset(robj
);
395 drm_gem_object_put_unlocked(gobj
);
399 int amdgpu_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
400 struct drm_file
*filp
)
402 union drm_amdgpu_gem_mmap
*args
= data
;
403 uint32_t handle
= args
->in
.handle
;
404 memset(args
, 0, sizeof(*args
));
405 return amdgpu_mode_dumb_mmap(filp
, dev
, handle
, &args
->out
.addr_ptr
);
409 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
411 * @timeout_ns: timeout in ns
413 * Calculate the timeout in jiffies from an absolute timeout in ns.
415 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns
)
417 unsigned long timeout_jiffies
;
420 /* clamp timeout if it's to large */
421 if (((int64_t)timeout_ns
) < 0)
422 return MAX_SCHEDULE_TIMEOUT
;
424 timeout
= ktime_sub(ns_to_ktime(timeout_ns
), ktime_get());
425 if (ktime_to_ns(timeout
) < 0)
428 timeout_jiffies
= nsecs_to_jiffies(ktime_to_ns(timeout
));
429 /* clamp timeout to avoid unsigned-> signed overflow */
430 if (timeout_jiffies
> MAX_SCHEDULE_TIMEOUT
)
431 return MAX_SCHEDULE_TIMEOUT
- 1;
433 return timeout_jiffies
;
436 int amdgpu_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
437 struct drm_file
*filp
)
439 union drm_amdgpu_gem_wait_idle
*args
= data
;
440 struct drm_gem_object
*gobj
;
441 struct amdgpu_bo
*robj
;
442 uint32_t handle
= args
->in
.handle
;
443 unsigned long timeout
= amdgpu_gem_timeout(args
->in
.timeout
);
447 gobj
= drm_gem_object_lookup(filp
, handle
);
451 robj
= gem_to_amdgpu_bo(gobj
);
452 ret
= dma_resv_wait_timeout_rcu(robj
->tbo
.base
.resv
, true, true,
455 /* ret == 0 means not signaled,
456 * ret > 0 means signaled
457 * ret < 0 means interrupted before timeout
460 memset(args
, 0, sizeof(*args
));
461 args
->out
.status
= (ret
== 0);
465 drm_gem_object_put_unlocked(gobj
);
469 int amdgpu_gem_metadata_ioctl(struct drm_device
*dev
, void *data
,
470 struct drm_file
*filp
)
472 struct drm_amdgpu_gem_metadata
*args
= data
;
473 struct drm_gem_object
*gobj
;
474 struct amdgpu_bo
*robj
;
477 DRM_DEBUG("%d \n", args
->handle
);
478 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
481 robj
= gem_to_amdgpu_bo(gobj
);
483 r
= amdgpu_bo_reserve(robj
, false);
484 if (unlikely(r
!= 0))
487 if (args
->op
== AMDGPU_GEM_METADATA_OP_GET_METADATA
) {
488 amdgpu_bo_get_tiling_flags(robj
, &args
->data
.tiling_info
);
489 r
= amdgpu_bo_get_metadata(robj
, args
->data
.data
,
490 sizeof(args
->data
.data
),
491 &args
->data
.data_size_bytes
,
493 } else if (args
->op
== AMDGPU_GEM_METADATA_OP_SET_METADATA
) {
494 if (args
->data
.data_size_bytes
> sizeof(args
->data
.data
)) {
498 r
= amdgpu_bo_set_tiling_flags(robj
, args
->data
.tiling_info
);
500 r
= amdgpu_bo_set_metadata(robj
, args
->data
.data
,
501 args
->data
.data_size_bytes
,
506 amdgpu_bo_unreserve(robj
);
508 drm_gem_object_put_unlocked(gobj
);
513 * amdgpu_gem_va_update_vm -update the bo_va in its VM
515 * @adev: amdgpu_device pointer
517 * @bo_va: bo_va to update
518 * @operation: map, unmap or clear
520 * Update the bo_va directly after setting its address. Errors are not
521 * vital here, so they are not reported back to userspace.
523 static void amdgpu_gem_va_update_vm(struct amdgpu_device
*adev
,
524 struct amdgpu_vm
*vm
,
525 struct amdgpu_bo_va
*bo_va
,
530 if (!amdgpu_vm_ready(vm
))
533 r
= amdgpu_vm_clear_freed(adev
, vm
, NULL
);
537 if (operation
== AMDGPU_VA_OP_MAP
||
538 operation
== AMDGPU_VA_OP_REPLACE
) {
539 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
544 r
= amdgpu_vm_update_pdes(adev
, vm
, false);
547 if (r
&& r
!= -ERESTARTSYS
)
548 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
552 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
554 * @adev: amdgpu_device pointer
555 * @flags: GEM UAPI flags
557 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
559 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device
*adev
, uint32_t flags
)
561 uint64_t pte_flag
= 0;
563 if (flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
564 pte_flag
|= AMDGPU_PTE_EXECUTABLE
;
565 if (flags
& AMDGPU_VM_PAGE_READABLE
)
566 pte_flag
|= AMDGPU_PTE_READABLE
;
567 if (flags
& AMDGPU_VM_PAGE_WRITEABLE
)
568 pte_flag
|= AMDGPU_PTE_WRITEABLE
;
569 if (flags
& AMDGPU_VM_PAGE_PRT
)
570 pte_flag
|= AMDGPU_PTE_PRT
;
572 if (adev
->gmc
.gmc_funcs
->map_mtype
)
573 pte_flag
|= amdgpu_gmc_map_mtype(adev
,
574 flags
& AMDGPU_VM_MTYPE_MASK
);
579 int amdgpu_gem_va_ioctl(struct drm_device
*dev
, void *data
,
580 struct drm_file
*filp
)
582 const uint32_t valid_flags
= AMDGPU_VM_DELAY_UPDATE
|
583 AMDGPU_VM_PAGE_READABLE
| AMDGPU_VM_PAGE_WRITEABLE
|
584 AMDGPU_VM_PAGE_EXECUTABLE
| AMDGPU_VM_MTYPE_MASK
;
585 const uint32_t prt_flags
= AMDGPU_VM_DELAY_UPDATE
|
588 struct drm_amdgpu_gem_va
*args
= data
;
589 struct drm_gem_object
*gobj
;
590 struct amdgpu_device
*adev
= dev
->dev_private
;
591 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
592 struct amdgpu_bo
*abo
;
593 struct amdgpu_bo_va
*bo_va
;
594 struct amdgpu_bo_list_entry vm_pd
;
595 struct ttm_validate_buffer tv
;
596 struct ww_acquire_ctx ticket
;
597 struct list_head list
, duplicates
;
601 if (args
->va_address
< AMDGPU_VA_RESERVED_SIZE
) {
602 dev_dbg(&dev
->pdev
->dev
,
603 "va_address 0x%LX is in reserved area 0x%LX\n",
604 args
->va_address
, AMDGPU_VA_RESERVED_SIZE
);
608 if (args
->va_address
>= AMDGPU_GMC_HOLE_START
&&
609 args
->va_address
< AMDGPU_GMC_HOLE_END
) {
610 dev_dbg(&dev
->pdev
->dev
,
611 "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
612 args
->va_address
, AMDGPU_GMC_HOLE_START
,
613 AMDGPU_GMC_HOLE_END
);
617 args
->va_address
&= AMDGPU_GMC_HOLE_MASK
;
619 if ((args
->flags
& ~valid_flags
) && (args
->flags
& ~prt_flags
)) {
620 dev_dbg(&dev
->pdev
->dev
, "invalid flags combination 0x%08X\n",
625 switch (args
->operation
) {
626 case AMDGPU_VA_OP_MAP
:
627 case AMDGPU_VA_OP_UNMAP
:
628 case AMDGPU_VA_OP_CLEAR
:
629 case AMDGPU_VA_OP_REPLACE
:
632 dev_dbg(&dev
->pdev
->dev
, "unsupported operation %d\n",
637 INIT_LIST_HEAD(&list
);
638 INIT_LIST_HEAD(&duplicates
);
639 if ((args
->operation
!= AMDGPU_VA_OP_CLEAR
) &&
640 !(args
->flags
& AMDGPU_VM_PAGE_PRT
)) {
641 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
644 abo
= gem_to_amdgpu_bo(gobj
);
646 if (abo
->flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
)
650 list_add(&tv
.head
, &list
);
656 amdgpu_vm_get_pd_bo(&fpriv
->vm
, &list
, &vm_pd
);
658 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, &duplicates
);
663 bo_va
= amdgpu_vm_bo_find(&fpriv
->vm
, abo
);
668 } else if (args
->operation
!= AMDGPU_VA_OP_CLEAR
) {
669 bo_va
= fpriv
->prt_va
;
674 switch (args
->operation
) {
675 case AMDGPU_VA_OP_MAP
:
676 va_flags
= amdgpu_gem_va_map_flags(adev
, args
->flags
);
677 r
= amdgpu_vm_bo_map(adev
, bo_va
, args
->va_address
,
678 args
->offset_in_bo
, args
->map_size
,
681 case AMDGPU_VA_OP_UNMAP
:
682 r
= amdgpu_vm_bo_unmap(adev
, bo_va
, args
->va_address
);
685 case AMDGPU_VA_OP_CLEAR
:
686 r
= amdgpu_vm_bo_clear_mappings(adev
, &fpriv
->vm
,
690 case AMDGPU_VA_OP_REPLACE
:
691 va_flags
= amdgpu_gem_va_map_flags(adev
, args
->flags
);
692 r
= amdgpu_vm_bo_replace_map(adev
, bo_va
, args
->va_address
,
693 args
->offset_in_bo
, args
->map_size
,
699 if (!r
&& !(args
->flags
& AMDGPU_VM_DELAY_UPDATE
) && !amdgpu_vm_debug
)
700 amdgpu_gem_va_update_vm(adev
, &fpriv
->vm
, bo_va
,
704 ttm_eu_backoff_reservation(&ticket
, &list
);
707 drm_gem_object_put_unlocked(gobj
);
711 int amdgpu_gem_op_ioctl(struct drm_device
*dev
, void *data
,
712 struct drm_file
*filp
)
714 struct amdgpu_device
*adev
= dev
->dev_private
;
715 struct drm_amdgpu_gem_op
*args
= data
;
716 struct drm_gem_object
*gobj
;
717 struct amdgpu_vm_bo_base
*base
;
718 struct amdgpu_bo
*robj
;
721 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
725 robj
= gem_to_amdgpu_bo(gobj
);
727 r
= amdgpu_bo_reserve(robj
, false);
732 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO
: {
733 struct drm_amdgpu_gem_create_in info
;
734 void __user
*out
= u64_to_user_ptr(args
->value
);
736 info
.bo_size
= robj
->tbo
.base
.size
;
737 info
.alignment
= robj
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
;
738 info
.domains
= robj
->preferred_domains
;
739 info
.domain_flags
= robj
->flags
;
740 amdgpu_bo_unreserve(robj
);
741 if (copy_to_user(out
, &info
, sizeof(info
)))
745 case AMDGPU_GEM_OP_SET_PLACEMENT
:
746 if (robj
->prime_shared_count
&& (args
->value
& AMDGPU_GEM_DOMAIN_VRAM
)) {
748 amdgpu_bo_unreserve(robj
);
751 if (amdgpu_ttm_tt_get_usermm(robj
->tbo
.ttm
)) {
753 amdgpu_bo_unreserve(robj
);
756 for (base
= robj
->vm_bo
; base
; base
= base
->next
)
757 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj
->tbo
.bdev
),
758 amdgpu_ttm_adev(base
->vm
->root
.base
.bo
->tbo
.bdev
))) {
760 amdgpu_bo_unreserve(robj
);
765 robj
->preferred_domains
= args
->value
& (AMDGPU_GEM_DOMAIN_VRAM
|
766 AMDGPU_GEM_DOMAIN_GTT
|
767 AMDGPU_GEM_DOMAIN_CPU
);
768 robj
->allowed_domains
= robj
->preferred_domains
;
769 if (robj
->allowed_domains
== AMDGPU_GEM_DOMAIN_VRAM
)
770 robj
->allowed_domains
|= AMDGPU_GEM_DOMAIN_GTT
;
772 if (robj
->flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
)
773 amdgpu_vm_bo_invalidate(adev
, robj
, true);
775 amdgpu_bo_unreserve(robj
);
778 amdgpu_bo_unreserve(robj
);
783 drm_gem_object_put_unlocked(gobj
);
787 int amdgpu_mode_dumb_create(struct drm_file
*file_priv
,
788 struct drm_device
*dev
,
789 struct drm_mode_create_dumb
*args
)
791 struct amdgpu_device
*adev
= dev
->dev_private
;
792 struct drm_gem_object
*gobj
;
794 u64 flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
795 AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
800 * The buffer returned from this function should be cleared, but
801 * it can only be done if the ring is enabled or we'll fail to
804 if (adev
->mman
.buffer_funcs_enabled
)
805 flags
|= AMDGPU_GEM_CREATE_VRAM_CLEARED
;
807 args
->pitch
= amdgpu_align_pitch(adev
, args
->width
,
808 DIV_ROUND_UP(args
->bpp
, 8), 0);
809 args
->size
= (u64
)args
->pitch
* args
->height
;
810 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
811 domain
= amdgpu_bo_get_preferred_pin_domain(adev
,
812 amdgpu_display_supported_domains(adev
, flags
));
813 r
= amdgpu_gem_object_create(adev
, args
->size
, 0, domain
, flags
,
814 ttm_bo_type_device
, NULL
, &gobj
);
818 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
819 /* drop reference from allocate - handle holds it now */
820 drm_gem_object_put_unlocked(gobj
);
824 args
->handle
= handle
;
828 #if defined(CONFIG_DEBUG_FS)
830 #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \
831 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
832 seq_printf((m), " " #flag); \
835 static int amdgpu_debugfs_gem_bo_info(int id
, void *ptr
, void *data
)
837 struct drm_gem_object
*gobj
= ptr
;
838 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(gobj
);
839 struct seq_file
*m
= data
;
841 struct dma_buf_attachment
*attachment
;
842 struct dma_buf
*dma_buf
;
844 const char *placement
;
847 domain
= amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
849 case AMDGPU_GEM_DOMAIN_VRAM
:
852 case AMDGPU_GEM_DOMAIN_GTT
:
855 case AMDGPU_GEM_DOMAIN_CPU
:
860 seq_printf(m
, "\t0x%08x: %12ld byte %s",
861 id
, amdgpu_bo_size(bo
), placement
);
863 pin_count
= READ_ONCE(bo
->pin_count
);
865 seq_printf(m
, " pin count %d", pin_count
);
867 dma_buf
= READ_ONCE(bo
->tbo
.base
.dma_buf
);
868 attachment
= READ_ONCE(bo
->tbo
.base
.import_attach
);
871 seq_printf(m
, " imported from %p%s", dma_buf
,
872 attachment
->peer2peer
? " P2P" : "");
874 seq_printf(m
, " exported as %p", dma_buf
);
876 amdgpu_debugfs_gem_bo_print_flag(m
, bo
, CPU_ACCESS_REQUIRED
);
877 amdgpu_debugfs_gem_bo_print_flag(m
, bo
, NO_CPU_ACCESS
);
878 amdgpu_debugfs_gem_bo_print_flag(m
, bo
, CPU_GTT_USWC
);
879 amdgpu_debugfs_gem_bo_print_flag(m
, bo
, VRAM_CLEARED
);
880 amdgpu_debugfs_gem_bo_print_flag(m
, bo
, SHADOW
);
881 amdgpu_debugfs_gem_bo_print_flag(m
, bo
, VRAM_CONTIGUOUS
);
882 amdgpu_debugfs_gem_bo_print_flag(m
, bo
, VM_ALWAYS_VALID
);
883 amdgpu_debugfs_gem_bo_print_flag(m
, bo
, EXPLICIT_SYNC
);
890 static int amdgpu_debugfs_gem_info(struct seq_file
*m
, void *data
)
892 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
893 struct drm_device
*dev
= node
->minor
->dev
;
894 struct drm_file
*file
;
897 r
= mutex_lock_interruptible(&dev
->filelist_mutex
);
901 list_for_each_entry(file
, &dev
->filelist
, lhead
) {
902 struct task_struct
*task
;
905 * Although we have a valid reference on file->pid, that does
906 * not guarantee that the task_struct who called get_pid() is
907 * still alive (e.g. get_pid(current) => fork() => exit()).
908 * Therefore, we need to protect this ->comm access using RCU.
911 task
= pid_task(file
->pid
, PIDTYPE_PID
);
912 seq_printf(m
, "pid %8d command %s:\n", pid_nr(file
->pid
),
913 task
? task
->comm
: "<unknown>");
916 spin_lock(&file
->table_lock
);
917 idr_for_each(&file
->object_idr
, amdgpu_debugfs_gem_bo_info
, m
);
918 spin_unlock(&file
->table_lock
);
921 mutex_unlock(&dev
->filelist_mutex
);
925 static const struct drm_info_list amdgpu_debugfs_gem_list
[] = {
926 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info
, 0, NULL
},
930 int amdgpu_debugfs_gem_init(struct amdgpu_device
*adev
)
932 #if defined(CONFIG_DEBUG_FS)
933 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_gem_list
, 1);