2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/ktime.h>
30 #include <drm/amdgpu_drm.h>
33 void amdgpu_gem_object_free(struct drm_gem_object
*gobj
)
35 struct amdgpu_bo
*robj
= gem_to_amdgpu_bo(gobj
);
38 if (robj
->gem_base
.import_attach
)
39 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
40 amdgpu_bo_unref(&robj
);
44 int amdgpu_gem_object_create(struct amdgpu_device
*adev
, unsigned long size
,
45 int alignment
, u32 initial_domain
,
46 u64 flags
, bool kernel
,
47 struct drm_gem_object
**obj
)
49 struct amdgpu_bo
*robj
;
50 unsigned long max_size
;
54 /* At least align on page size */
55 if (alignment
< PAGE_SIZE
) {
56 alignment
= PAGE_SIZE
;
59 if (!(initial_domain
& (AMDGPU_GEM_DOMAIN_GDS
| AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
))) {
60 /* Maximum bo size is the unpinned gtt size since we use the gtt to
61 * handle vram to system pool migrations.
63 max_size
= adev
->mc
.gtt_size
- adev
->gart_pin_size
;
64 if (size
> max_size
) {
65 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
66 size
>> 20, max_size
>> 20);
71 r
= amdgpu_bo_create(adev
, size
, alignment
, kernel
, initial_domain
, flags
, NULL
, &robj
);
73 if (r
!= -ERESTARTSYS
) {
74 if (initial_domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
75 initial_domain
|= AMDGPU_GEM_DOMAIN_GTT
;
78 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
79 size
, initial_domain
, alignment
, r
);
83 *obj
= &robj
->gem_base
;
84 robj
->pid
= task_pid_nr(current
);
86 mutex_lock(&adev
->gem
.mutex
);
87 list_add_tail(&robj
->list
, &adev
->gem
.objects
);
88 mutex_unlock(&adev
->gem
.mutex
);
93 int amdgpu_gem_init(struct amdgpu_device
*adev
)
95 INIT_LIST_HEAD(&adev
->gem
.objects
);
99 void amdgpu_gem_fini(struct amdgpu_device
*adev
)
101 amdgpu_bo_force_delete(adev
);
105 * Call from drm_gem_handle_create which appear in both new and open ioctl
108 int amdgpu_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
110 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(obj
);
111 struct amdgpu_device
*adev
= rbo
->adev
;
112 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
113 struct amdgpu_vm
*vm
= &fpriv
->vm
;
114 struct amdgpu_bo_va
*bo_va
;
117 r
= amdgpu_bo_reserve(rbo
, false);
122 bo_va
= amdgpu_vm_bo_find(vm
, rbo
);
124 bo_va
= amdgpu_vm_bo_add(adev
, vm
, rbo
);
128 amdgpu_bo_unreserve(rbo
);
133 void amdgpu_gem_object_close(struct drm_gem_object
*obj
,
134 struct drm_file
*file_priv
)
136 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(obj
);
137 struct amdgpu_device
*adev
= rbo
->adev
;
138 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
139 struct amdgpu_vm
*vm
= &fpriv
->vm
;
140 struct amdgpu_bo_va
*bo_va
;
143 r
= amdgpu_bo_reserve(rbo
, true);
145 dev_err(adev
->dev
, "leaking bo va because "
146 "we fail to reserve bo (%d)\n", r
);
149 bo_va
= amdgpu_vm_bo_find(vm
, rbo
);
151 if (--bo_va
->ref_count
== 0) {
152 amdgpu_vm_bo_rmv(adev
, bo_va
);
155 amdgpu_bo_unreserve(rbo
);
158 static int amdgpu_gem_handle_lockup(struct amdgpu_device
*adev
, int r
)
161 r
= amdgpu_gpu_reset(adev
);
171 int amdgpu_gem_create_ioctl(struct drm_device
*dev
, void *data
,
172 struct drm_file
*filp
)
174 struct amdgpu_device
*adev
= dev
->dev_private
;
175 union drm_amdgpu_gem_create
*args
= data
;
176 uint64_t size
= args
->in
.bo_size
;
177 struct drm_gem_object
*gobj
;
182 down_read(&adev
->exclusive_lock
);
183 /* create a gem object to contain this object in */
184 if (args
->in
.domains
& (AMDGPU_GEM_DOMAIN_GDS
|
185 AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
)) {
187 if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GDS
)
188 size
= size
<< AMDGPU_GDS_SHIFT
;
189 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GWS
)
190 size
= size
<< AMDGPU_GWS_SHIFT
;
191 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_OA
)
192 size
= size
<< AMDGPU_OA_SHIFT
;
198 size
= roundup(size
, PAGE_SIZE
);
200 r
= amdgpu_gem_object_create(adev
, size
, args
->in
.alignment
,
201 (u32
)(0xffffffff & args
->in
.domains
),
202 args
->in
.domain_flags
,
207 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
208 /* drop reference from allocate - handle holds it now */
209 drm_gem_object_unreference_unlocked(gobj
);
213 memset(args
, 0, sizeof(*args
));
214 args
->out
.handle
= handle
;
215 up_read(&adev
->exclusive_lock
);
219 up_read(&adev
->exclusive_lock
);
220 r
= amdgpu_gem_handle_lockup(adev
, r
);
224 int amdgpu_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
225 struct drm_file
*filp
)
227 struct amdgpu_device
*adev
= dev
->dev_private
;
228 struct drm_amdgpu_gem_userptr
*args
= data
;
229 struct drm_gem_object
*gobj
;
230 struct amdgpu_bo
*bo
;
234 if (offset_in_page(args
->addr
| args
->size
))
237 /* reject unknown flag values */
238 if (args
->flags
& ~(AMDGPU_GEM_USERPTR_READONLY
|
239 AMDGPU_GEM_USERPTR_ANONONLY
| AMDGPU_GEM_USERPTR_VALIDATE
|
240 AMDGPU_GEM_USERPTR_REGISTER
))
243 if (!(args
->flags
& AMDGPU_GEM_USERPTR_ANONONLY
) ||
244 !(args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
)) {
246 /* if we want to write to it we must require anonymous
247 memory and install a MMU notifier */
251 down_read(&adev
->exclusive_lock
);
253 /* create a gem object to contain this object in */
254 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
255 AMDGPU_GEM_DOMAIN_CPU
, 0,
260 bo
= gem_to_amdgpu_bo(gobj
);
261 r
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
265 if (args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
) {
266 r
= amdgpu_mn_register(bo
, args
->addr
);
271 if (args
->flags
& AMDGPU_GEM_USERPTR_VALIDATE
) {
272 down_read(¤t
->mm
->mmap_sem
);
273 r
= amdgpu_bo_reserve(bo
, true);
275 up_read(¤t
->mm
->mmap_sem
);
279 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_GTT
);
280 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
281 amdgpu_bo_unreserve(bo
);
282 up_read(¤t
->mm
->mmap_sem
);
287 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
288 /* drop reference from allocate - handle holds it now */
289 drm_gem_object_unreference_unlocked(gobj
);
293 args
->handle
= handle
;
294 up_read(&adev
->exclusive_lock
);
298 drm_gem_object_unreference_unlocked(gobj
);
301 up_read(&adev
->exclusive_lock
);
302 r
= amdgpu_gem_handle_lockup(adev
, r
);
307 int amdgpu_mode_dumb_mmap(struct drm_file
*filp
,
308 struct drm_device
*dev
,
309 uint32_t handle
, uint64_t *offset_p
)
311 struct drm_gem_object
*gobj
;
312 struct amdgpu_bo
*robj
;
314 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
318 robj
= gem_to_amdgpu_bo(gobj
);
319 if (amdgpu_ttm_tt_has_userptr(robj
->tbo
.ttm
) ||
320 (robj
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)) {
321 drm_gem_object_unreference_unlocked(gobj
);
324 *offset_p
= amdgpu_bo_mmap_offset(robj
);
325 drm_gem_object_unreference_unlocked(gobj
);
329 int amdgpu_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
330 struct drm_file
*filp
)
332 union drm_amdgpu_gem_mmap
*args
= data
;
333 uint32_t handle
= args
->in
.handle
;
334 memset(args
, 0, sizeof(*args
));
335 return amdgpu_mode_dumb_mmap(filp
, dev
, handle
, &args
->out
.addr_ptr
);
339 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
341 * @timeout_ns: timeout in ns
343 * Calculate the timeout in jiffies from an absolute timeout in ns.
345 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns
)
347 unsigned long timeout_jiffies
;
350 /* clamp timeout if it's to large */
351 if (((int64_t)timeout_ns
) < 0)
352 return MAX_SCHEDULE_TIMEOUT
;
354 timeout
= ktime_sub_ns(ktime_get(), timeout_ns
);
355 if (ktime_to_ns(timeout
) < 0)
358 timeout_jiffies
= nsecs_to_jiffies(ktime_to_ns(timeout
));
359 /* clamp timeout to avoid unsigned-> signed overflow */
360 if (timeout_jiffies
> MAX_SCHEDULE_TIMEOUT
)
361 return MAX_SCHEDULE_TIMEOUT
- 1;
363 return timeout_jiffies
;
366 int amdgpu_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
367 struct drm_file
*filp
)
369 struct amdgpu_device
*adev
= dev
->dev_private
;
370 union drm_amdgpu_gem_wait_idle
*args
= data
;
371 struct drm_gem_object
*gobj
;
372 struct amdgpu_bo
*robj
;
373 uint32_t handle
= args
->in
.handle
;
374 unsigned long timeout
= amdgpu_gem_timeout(args
->in
.timeout
);
378 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
382 robj
= gem_to_amdgpu_bo(gobj
);
384 ret
= reservation_object_test_signaled_rcu(robj
->tbo
.resv
, true);
386 ret
= reservation_object_wait_timeout_rcu(robj
->tbo
.resv
, true, true, timeout
);
388 /* ret == 0 means not signaled,
389 * ret > 0 means signaled
390 * ret < 0 means interrupted before timeout
393 memset(args
, 0, sizeof(*args
));
394 args
->out
.status
= (ret
== 0);
398 drm_gem_object_unreference_unlocked(gobj
);
399 r
= amdgpu_gem_handle_lockup(adev
, r
);
403 int amdgpu_gem_metadata_ioctl(struct drm_device
*dev
, void *data
,
404 struct drm_file
*filp
)
406 struct drm_amdgpu_gem_metadata
*args
= data
;
407 struct drm_gem_object
*gobj
;
408 struct amdgpu_bo
*robj
;
411 DRM_DEBUG("%d \n", args
->handle
);
412 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
415 robj
= gem_to_amdgpu_bo(gobj
);
417 r
= amdgpu_bo_reserve(robj
, false);
418 if (unlikely(r
!= 0))
421 if (args
->op
== AMDGPU_GEM_METADATA_OP_GET_METADATA
) {
422 amdgpu_bo_get_tiling_flags(robj
, &args
->data
.tiling_info
);
423 r
= amdgpu_bo_get_metadata(robj
, args
->data
.data
,
424 sizeof(args
->data
.data
),
425 &args
->data
.data_size_bytes
,
427 } else if (args
->op
== AMDGPU_GEM_METADATA_OP_SET_METADATA
) {
428 r
= amdgpu_bo_set_tiling_flags(robj
, args
->data
.tiling_info
);
430 r
= amdgpu_bo_set_metadata(robj
, args
->data
.data
,
431 args
->data
.data_size_bytes
,
435 amdgpu_bo_unreserve(robj
);
437 drm_gem_object_unreference_unlocked(gobj
);
442 * amdgpu_gem_va_update_vm -update the bo_va in its VM
444 * @adev: amdgpu_device pointer
445 * @bo_va: bo_va to update
447 * Update the bo_va directly after setting it's address. Errors are not
448 * vital here, so they are not reported back to userspace.
450 static void amdgpu_gem_va_update_vm(struct amdgpu_device
*adev
,
451 struct amdgpu_bo_va
*bo_va
)
453 struct ttm_validate_buffer tv
, *entry
;
454 struct amdgpu_bo_list_entry
*vm_bos
;
455 struct ww_acquire_ctx ticket
;
456 struct list_head list
;
460 INIT_LIST_HEAD(&list
);
462 tv
.bo
= &bo_va
->bo
->tbo
;
464 list_add(&tv
.head
, &list
);
466 vm_bos
= amdgpu_vm_get_bos(adev
, bo_va
->vm
, &list
);
470 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, NULL
);
474 list_for_each_entry(entry
, &list
, head
) {
475 domain
= amdgpu_mem_type_to_domain(entry
->bo
->mem
.mem_type
);
476 /* if anything is swapped out don't swap it in here,
477 just abort and wait for the next CS */
478 if (domain
== AMDGPU_GEM_DOMAIN_CPU
)
479 goto error_unreserve
;
482 mutex_lock(&bo_va
->vm
->mutex
);
483 r
= amdgpu_vm_clear_freed(adev
, bo_va
->vm
);
487 r
= amdgpu_vm_bo_update(adev
, bo_va
, &bo_va
->bo
->tbo
.mem
);
490 mutex_unlock(&bo_va
->vm
->mutex
);
493 ttm_eu_backoff_reservation(&ticket
, &list
);
496 drm_free_large(vm_bos
);
499 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
504 int amdgpu_gem_va_ioctl(struct drm_device
*dev
, void *data
,
505 struct drm_file
*filp
)
507 union drm_amdgpu_gem_va
*args
= data
;
508 struct drm_gem_object
*gobj
;
509 struct amdgpu_device
*adev
= dev
->dev_private
;
510 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
511 struct amdgpu_bo
*rbo
;
512 struct amdgpu_bo_va
*bo_va
;
513 uint32_t invalid_flags
, va_flags
= 0;
516 if (!adev
->vm_manager
.enabled
) {
517 memset(args
, 0, sizeof(*args
));
518 args
->out
.result
= AMDGPU_VA_RESULT_ERROR
;
522 if (args
->in
.va_address
< AMDGPU_VA_RESERVED_SIZE
) {
523 dev_err(&dev
->pdev
->dev
,
524 "va_address 0x%lX is in reserved area 0x%X\n",
525 (unsigned long)args
->in
.va_address
,
526 AMDGPU_VA_RESERVED_SIZE
);
527 memset(args
, 0, sizeof(*args
));
528 args
->out
.result
= AMDGPU_VA_RESULT_ERROR
;
532 invalid_flags
= ~(AMDGPU_VM_PAGE_READABLE
| AMDGPU_VM_PAGE_WRITEABLE
|
533 AMDGPU_VM_PAGE_EXECUTABLE
);
534 if ((args
->in
.flags
& invalid_flags
)) {
535 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
536 args
->in
.flags
, invalid_flags
);
537 memset(args
, 0, sizeof(*args
));
538 args
->out
.result
= AMDGPU_VA_RESULT_ERROR
;
542 switch (args
->in
.operation
) {
543 case AMDGPU_VA_OP_MAP
:
544 case AMDGPU_VA_OP_UNMAP
:
547 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
549 memset(args
, 0, sizeof(*args
));
550 args
->out
.result
= AMDGPU_VA_RESULT_ERROR
;
554 gobj
= drm_gem_object_lookup(dev
, filp
, args
->in
.handle
);
556 memset(args
, 0, sizeof(*args
));
557 args
->out
.result
= AMDGPU_VA_RESULT_ERROR
;
560 rbo
= gem_to_amdgpu_bo(gobj
);
561 r
= amdgpu_bo_reserve(rbo
, false);
563 if (r
!= -ERESTARTSYS
) {
564 memset(args
, 0, sizeof(*args
));
565 args
->out
.result
= AMDGPU_VA_RESULT_ERROR
;
567 drm_gem_object_unreference_unlocked(gobj
);
570 bo_va
= amdgpu_vm_bo_find(&fpriv
->vm
, rbo
);
572 memset(args
, 0, sizeof(*args
));
573 args
->out
.result
= AMDGPU_VA_RESULT_ERROR
;
574 drm_gem_object_unreference_unlocked(gobj
);
578 switch (args
->in
.operation
) {
579 case AMDGPU_VA_OP_MAP
:
580 if (args
->in
.flags
& AMDGPU_VM_PAGE_READABLE
)
581 va_flags
|= AMDGPU_PTE_READABLE
;
582 if (args
->in
.flags
& AMDGPU_VM_PAGE_WRITEABLE
)
583 va_flags
|= AMDGPU_PTE_WRITEABLE
;
584 if (args
->in
.flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
585 va_flags
|= AMDGPU_PTE_EXECUTABLE
;
586 r
= amdgpu_vm_bo_map(adev
, bo_va
, args
->in
.va_address
,
587 args
->in
.offset_in_bo
, args
->in
.map_size
,
590 case AMDGPU_VA_OP_UNMAP
:
591 r
= amdgpu_vm_bo_unmap(adev
, bo_va
, args
->in
.va_address
);
598 amdgpu_gem_va_update_vm(adev
, bo_va
);
599 memset(args
, 0, sizeof(*args
));
600 args
->out
.result
= AMDGPU_VA_RESULT_OK
;
602 memset(args
, 0, sizeof(*args
));
603 args
->out
.result
= AMDGPU_VA_RESULT_ERROR
;
606 drm_gem_object_unreference_unlocked(gobj
);
610 int amdgpu_gem_op_ioctl(struct drm_device
*dev
, void *data
,
611 struct drm_file
*filp
)
613 struct drm_amdgpu_gem_op
*args
= data
;
614 struct drm_gem_object
*gobj
;
615 struct amdgpu_bo
*robj
;
618 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
622 robj
= gem_to_amdgpu_bo(gobj
);
624 r
= amdgpu_bo_reserve(robj
, false);
629 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO
: {
630 struct drm_amdgpu_gem_create_in info
;
631 void __user
*out
= (void __user
*)(long)args
->value
;
633 info
.bo_size
= robj
->gem_base
.size
;
634 info
.alignment
= robj
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
;
635 info
.domains
= robj
->initial_domain
;
636 info
.domain_flags
= robj
->flags
;
637 if (copy_to_user(out
, &info
, sizeof(info
)))
641 case AMDGPU_GEM_OP_SET_PLACEMENT
:
642 if (amdgpu_ttm_tt_has_userptr(robj
->tbo
.ttm
)) {
646 robj
->initial_domain
= args
->value
& (AMDGPU_GEM_DOMAIN_VRAM
|
647 AMDGPU_GEM_DOMAIN_GTT
|
648 AMDGPU_GEM_DOMAIN_CPU
);
654 amdgpu_bo_unreserve(robj
);
656 drm_gem_object_unreference_unlocked(gobj
);
660 int amdgpu_mode_dumb_create(struct drm_file
*file_priv
,
661 struct drm_device
*dev
,
662 struct drm_mode_create_dumb
*args
)
664 struct amdgpu_device
*adev
= dev
->dev_private
;
665 struct drm_gem_object
*gobj
;
669 args
->pitch
= amdgpu_align_pitch(adev
, args
->width
, args
->bpp
, 0) * ((args
->bpp
+ 1) / 8);
670 args
->size
= args
->pitch
* args
->height
;
671 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
673 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
674 AMDGPU_GEM_DOMAIN_VRAM
,
675 0, ttm_bo_type_device
,
680 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
681 /* drop reference from allocate - handle holds it now */
682 drm_gem_object_unreference_unlocked(gobj
);
686 args
->handle
= handle
;
690 #if defined(CONFIG_DEBUG_FS)
691 static int amdgpu_debugfs_gem_info(struct seq_file
*m
, void *data
)
693 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
694 struct drm_device
*dev
= node
->minor
->dev
;
695 struct amdgpu_device
*adev
= dev
->dev_private
;
696 struct amdgpu_bo
*rbo
;
699 mutex_lock(&adev
->gem
.mutex
);
700 list_for_each_entry(rbo
, &adev
->gem
.objects
, list
) {
702 const char *placement
;
704 domain
= amdgpu_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
706 case AMDGPU_GEM_DOMAIN_VRAM
:
709 case AMDGPU_GEM_DOMAIN_GTT
:
712 case AMDGPU_GEM_DOMAIN_CPU
:
717 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
718 i
, amdgpu_bo_size(rbo
) >> 10, amdgpu_bo_size(rbo
) >> 20,
719 placement
, (unsigned long)rbo
->pid
);
722 mutex_unlock(&adev
->gem
.mutex
);
726 static struct drm_info_list amdgpu_debugfs_gem_list
[] = {
727 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info
, 0, NULL
},
731 int amdgpu_gem_debugfs_init(struct amdgpu_device
*adev
)
733 #if defined(CONFIG_DEBUG_FS)
734 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_gem_list
, 1);