2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_sched.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
35 #include <linux/vga_switcheroo.h>
36 #include <linux/slab.h>
37 #include <linux/pm_runtime.h>
38 #include "amdgpu_amdkfd.h"
41 * amdgpu_driver_unload_kms - Main unload function for KMS.
43 * @dev: drm dev pointer
45 * This is the main unload function for KMS (all asics).
46 * Returns 0 on success.
48 void amdgpu_driver_unload_kms(struct drm_device
*dev
)
50 struct amdgpu_device
*adev
= dev
->dev_private
;
55 if (adev
->rmmio
== NULL
)
58 if (amdgpu_sriov_vf(adev
))
59 amdgpu_virt_request_full_gpu(adev
, false);
61 if (amdgpu_device_is_px(dev
)) {
62 pm_runtime_get_sync(dev
->dev
);
63 pm_runtime_forbid(dev
->dev
);
66 amdgpu_amdkfd_device_fini(adev
);
68 amdgpu_acpi_fini(adev
);
70 amdgpu_device_fini(adev
);
74 dev
->dev_private
= NULL
;
78 * amdgpu_driver_load_kms - Main load function for KMS.
80 * @dev: drm dev pointer
81 * @flags: device flags
83 * This is the main load function for KMS (all asics).
84 * Returns 0 on success, error on failure.
86 int amdgpu_driver_load_kms(struct drm_device
*dev
, unsigned long flags
)
88 struct amdgpu_device
*adev
;
91 #ifdef CONFIG_DRM_AMDGPU_SI
92 if (!amdgpu_si_support
) {
93 switch (flags
& AMD_ASIC_MASK
) {
100 "SI support provided by radeon.\n");
102 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
108 #ifdef CONFIG_DRM_AMDGPU_CIK
109 if (!amdgpu_cik_support
) {
110 switch (flags
& AMD_ASIC_MASK
) {
117 "CIK support provided by radeon.\n");
119 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
126 adev
= kzalloc(sizeof(struct amdgpu_device
), GFP_KERNEL
);
130 dev
->dev_private
= (void *)adev
;
132 if ((amdgpu_runtime_pm
!= 0) &&
134 (amdgpu_is_atpx_hybrid() ||
135 amdgpu_has_atpx_dgpu_power_cntl()) &&
136 ((flags
& AMD_IS_APU
) == 0) &&
137 !pci_is_thunderbolt_attached(dev
->pdev
))
140 /* amdgpu_device_init should report only fatal error
141 * like memory allocation failure or iomapping failure,
142 * or memory manager initialization failure, it must
143 * properly initialize the GPU MC controller and permit
146 r
= amdgpu_device_init(adev
, dev
, dev
->pdev
, flags
);
148 dev_err(&dev
->pdev
->dev
, "Fatal error during GPU init\n");
152 /* Call ACPI methods: require modeset init
153 * but failure is not fatal
156 acpi_status
= amdgpu_acpi_init(adev
);
158 dev_dbg(&dev
->pdev
->dev
,
159 "Error during ACPI methods call\n");
162 amdgpu_amdkfd_device_probe(adev
);
163 amdgpu_amdkfd_device_init(adev
);
165 if (amdgpu_device_is_px(dev
)) {
166 dev_pm_set_driver_flags(dev
->dev
, DPM_FLAG_NEVER_SKIP
);
167 pm_runtime_use_autosuspend(dev
->dev
);
168 pm_runtime_set_autosuspend_delay(dev
->dev
, 5000);
169 pm_runtime_set_active(dev
->dev
);
170 pm_runtime_allow(dev
->dev
);
171 pm_runtime_mark_last_busy(dev
->dev
);
172 pm_runtime_put_autosuspend(dev
->dev
);
175 if (amdgpu_sriov_vf(adev
))
176 amdgpu_virt_release_full_gpu(adev
, true);
180 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
181 if (adev
->rmmio
&& amdgpu_device_is_px(dev
))
182 pm_runtime_put_noidle(dev
->dev
);
183 amdgpu_driver_unload_kms(dev
);
189 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware
*fw_info
,
190 struct drm_amdgpu_query_fw
*query_fw
,
191 struct amdgpu_device
*adev
)
193 switch (query_fw
->fw_type
) {
194 case AMDGPU_INFO_FW_VCE
:
195 fw_info
->ver
= adev
->vce
.fw_version
;
196 fw_info
->feature
= adev
->vce
.fb_version
;
198 case AMDGPU_INFO_FW_UVD
:
199 fw_info
->ver
= adev
->uvd
.fw_version
;
200 fw_info
->feature
= 0;
202 case AMDGPU_INFO_FW_GMC
:
203 fw_info
->ver
= adev
->mc
.fw_version
;
204 fw_info
->feature
= 0;
206 case AMDGPU_INFO_FW_GFX_ME
:
207 fw_info
->ver
= adev
->gfx
.me_fw_version
;
208 fw_info
->feature
= adev
->gfx
.me_feature_version
;
210 case AMDGPU_INFO_FW_GFX_PFP
:
211 fw_info
->ver
= adev
->gfx
.pfp_fw_version
;
212 fw_info
->feature
= adev
->gfx
.pfp_feature_version
;
214 case AMDGPU_INFO_FW_GFX_CE
:
215 fw_info
->ver
= adev
->gfx
.ce_fw_version
;
216 fw_info
->feature
= adev
->gfx
.ce_feature_version
;
218 case AMDGPU_INFO_FW_GFX_RLC
:
219 fw_info
->ver
= adev
->gfx
.rlc_fw_version
;
220 fw_info
->feature
= adev
->gfx
.rlc_feature_version
;
222 case AMDGPU_INFO_FW_GFX_MEC
:
223 if (query_fw
->index
== 0) {
224 fw_info
->ver
= adev
->gfx
.mec_fw_version
;
225 fw_info
->feature
= adev
->gfx
.mec_feature_version
;
226 } else if (query_fw
->index
== 1) {
227 fw_info
->ver
= adev
->gfx
.mec2_fw_version
;
228 fw_info
->feature
= adev
->gfx
.mec2_feature_version
;
232 case AMDGPU_INFO_FW_SMC
:
233 fw_info
->ver
= adev
->pm
.fw_version
;
234 fw_info
->feature
= 0;
236 case AMDGPU_INFO_FW_SDMA
:
237 if (query_fw
->index
>= adev
->sdma
.num_instances
)
239 fw_info
->ver
= adev
->sdma
.instance
[query_fw
->index
].fw_version
;
240 fw_info
->feature
= adev
->sdma
.instance
[query_fw
->index
].feature_version
;
242 case AMDGPU_INFO_FW_SOS
:
243 fw_info
->ver
= adev
->psp
.sos_fw_version
;
244 fw_info
->feature
= adev
->psp
.sos_feature_version
;
246 case AMDGPU_INFO_FW_ASD
:
247 fw_info
->ver
= adev
->psp
.asd_fw_version
;
248 fw_info
->feature
= adev
->psp
.asd_feature_version
;
257 * Userspace get information ioctl
260 * amdgpu_info_ioctl - answer a device specific request.
262 * @adev: amdgpu device pointer
263 * @data: request object
266 * This function is used to pass device specific parameters to the userspace
267 * drivers. Examples include: pci device id, pipeline parms, tiling params,
269 * Returns 0 on success, -EINVAL on failure.
271 static int amdgpu_info_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
273 struct amdgpu_device
*adev
= dev
->dev_private
;
274 struct drm_amdgpu_info
*info
= data
;
275 struct amdgpu_mode_info
*minfo
= &adev
->mode_info
;
276 void __user
*out
= (void __user
*)(uintptr_t)info
->return_pointer
;
277 uint32_t size
= info
->return_size
;
278 struct drm_crtc
*crtc
;
282 int ui32_size
= sizeof(ui32
);
284 if (!info
->return_size
|| !info
->return_pointer
)
287 switch (info
->query
) {
288 case AMDGPU_INFO_ACCEL_WORKING
:
289 ui32
= adev
->accel_working
;
290 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
291 case AMDGPU_INFO_CRTC_FROM_ID
:
292 for (i
= 0, found
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
293 crtc
= (struct drm_crtc
*)minfo
->crtcs
[i
];
294 if (crtc
&& crtc
->base
.id
== info
->mode_crtc
.id
) {
295 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
296 ui32
= amdgpu_crtc
->crtc_id
;
302 DRM_DEBUG_KMS("unknown crtc id %d\n", info
->mode_crtc
.id
);
305 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
306 case AMDGPU_INFO_HW_IP_INFO
: {
307 struct drm_amdgpu_info_hw_ip ip
= {};
308 enum amd_ip_block_type type
;
309 uint32_t ring_mask
= 0;
310 uint32_t ib_start_alignment
= 0;
311 uint32_t ib_size_alignment
= 0;
313 if (info
->query_hw_ip
.ip_instance
>= AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
316 switch (info
->query_hw_ip
.type
) {
317 case AMDGPU_HW_IP_GFX
:
318 type
= AMD_IP_BLOCK_TYPE_GFX
;
319 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
320 ring_mask
|= ((adev
->gfx
.gfx_ring
[i
].ready
? 1 : 0) << i
);
321 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
322 ib_size_alignment
= 8;
324 case AMDGPU_HW_IP_COMPUTE
:
325 type
= AMD_IP_BLOCK_TYPE_GFX
;
326 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
327 ring_mask
|= ((adev
->gfx
.compute_ring
[i
].ready
? 1 : 0) << i
);
328 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
329 ib_size_alignment
= 8;
331 case AMDGPU_HW_IP_DMA
:
332 type
= AMD_IP_BLOCK_TYPE_SDMA
;
333 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++)
334 ring_mask
|= ((adev
->sdma
.instance
[i
].ring
.ready
? 1 : 0) << i
);
335 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
336 ib_size_alignment
= 1;
338 case AMDGPU_HW_IP_UVD
:
339 type
= AMD_IP_BLOCK_TYPE_UVD
;
340 ring_mask
= adev
->uvd
.ring
.ready
? 1 : 0;
341 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
342 ib_size_alignment
= 16;
344 case AMDGPU_HW_IP_VCE
:
345 type
= AMD_IP_BLOCK_TYPE_VCE
;
346 for (i
= 0; i
< adev
->vce
.num_rings
; i
++)
347 ring_mask
|= ((adev
->vce
.ring
[i
].ready
? 1 : 0) << i
);
348 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
349 ib_size_alignment
= 1;
351 case AMDGPU_HW_IP_UVD_ENC
:
352 type
= AMD_IP_BLOCK_TYPE_UVD
;
353 for (i
= 0; i
< adev
->uvd
.num_enc_rings
; i
++)
354 ring_mask
|= ((adev
->uvd
.ring_enc
[i
].ready
? 1 : 0) << i
);
355 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
356 ib_size_alignment
= 1;
358 case AMDGPU_HW_IP_VCN_DEC
:
359 type
= AMD_IP_BLOCK_TYPE_VCN
;
360 ring_mask
= adev
->vcn
.ring_dec
.ready
? 1 : 0;
361 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
362 ib_size_alignment
= 16;
364 case AMDGPU_HW_IP_VCN_ENC
:
365 type
= AMD_IP_BLOCK_TYPE_VCN
;
366 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; i
++)
367 ring_mask
|= ((adev
->vcn
.ring_enc
[i
].ready
? 1 : 0) << i
);
368 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
369 ib_size_alignment
= 1;
375 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
376 if (adev
->ip_blocks
[i
].version
->type
== type
&&
377 adev
->ip_blocks
[i
].status
.valid
) {
378 ip
.hw_ip_version_major
= adev
->ip_blocks
[i
].version
->major
;
379 ip
.hw_ip_version_minor
= adev
->ip_blocks
[i
].version
->minor
;
380 ip
.capabilities_flags
= 0;
381 ip
.available_rings
= ring_mask
;
382 ip
.ib_start_alignment
= ib_start_alignment
;
383 ip
.ib_size_alignment
= ib_size_alignment
;
387 return copy_to_user(out
, &ip
,
388 min((size_t)size
, sizeof(ip
))) ? -EFAULT
: 0;
390 case AMDGPU_INFO_HW_IP_COUNT
: {
391 enum amd_ip_block_type type
;
394 switch (info
->query_hw_ip
.type
) {
395 case AMDGPU_HW_IP_GFX
:
396 type
= AMD_IP_BLOCK_TYPE_GFX
;
398 case AMDGPU_HW_IP_COMPUTE
:
399 type
= AMD_IP_BLOCK_TYPE_GFX
;
401 case AMDGPU_HW_IP_DMA
:
402 type
= AMD_IP_BLOCK_TYPE_SDMA
;
404 case AMDGPU_HW_IP_UVD
:
405 type
= AMD_IP_BLOCK_TYPE_UVD
;
407 case AMDGPU_HW_IP_VCE
:
408 type
= AMD_IP_BLOCK_TYPE_VCE
;
410 case AMDGPU_HW_IP_UVD_ENC
:
411 type
= AMD_IP_BLOCK_TYPE_UVD
;
413 case AMDGPU_HW_IP_VCN_DEC
:
414 case AMDGPU_HW_IP_VCN_ENC
:
415 type
= AMD_IP_BLOCK_TYPE_VCN
;
421 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
422 if (adev
->ip_blocks
[i
].version
->type
== type
&&
423 adev
->ip_blocks
[i
].status
.valid
&&
424 count
< AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
427 return copy_to_user(out
, &count
, min(size
, 4u)) ? -EFAULT
: 0;
429 case AMDGPU_INFO_TIMESTAMP
:
430 ui64
= amdgpu_gfx_get_gpu_clock_counter(adev
);
431 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
432 case AMDGPU_INFO_FW_VERSION
: {
433 struct drm_amdgpu_info_firmware fw_info
;
436 /* We only support one instance of each IP block right now. */
437 if (info
->query_fw
.ip_instance
!= 0)
440 ret
= amdgpu_firmware_info(&fw_info
, &info
->query_fw
, adev
);
444 return copy_to_user(out
, &fw_info
,
445 min((size_t)size
, sizeof(fw_info
))) ? -EFAULT
: 0;
447 case AMDGPU_INFO_NUM_BYTES_MOVED
:
448 ui64
= atomic64_read(&adev
->num_bytes_moved
);
449 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
450 case AMDGPU_INFO_NUM_EVICTIONS
:
451 ui64
= atomic64_read(&adev
->num_evictions
);
452 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
453 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
:
454 ui64
= atomic64_read(&adev
->num_vram_cpu_page_faults
);
455 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
456 case AMDGPU_INFO_VRAM_USAGE
:
457 ui64
= amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
458 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
459 case AMDGPU_INFO_VIS_VRAM_USAGE
:
460 ui64
= amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
461 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
462 case AMDGPU_INFO_GTT_USAGE
:
463 ui64
= amdgpu_gtt_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_TT
]);
464 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
465 case AMDGPU_INFO_GDS_CONFIG
: {
466 struct drm_amdgpu_info_gds gds_info
;
468 memset(&gds_info
, 0, sizeof(gds_info
));
469 gds_info
.gds_gfx_partition_size
= adev
->gds
.mem
.gfx_partition_size
>> AMDGPU_GDS_SHIFT
;
470 gds_info
.compute_partition_size
= adev
->gds
.mem
.cs_partition_size
>> AMDGPU_GDS_SHIFT
;
471 gds_info
.gds_total_size
= adev
->gds
.mem
.total_size
>> AMDGPU_GDS_SHIFT
;
472 gds_info
.gws_per_gfx_partition
= adev
->gds
.gws
.gfx_partition_size
>> AMDGPU_GWS_SHIFT
;
473 gds_info
.gws_per_compute_partition
= adev
->gds
.gws
.cs_partition_size
>> AMDGPU_GWS_SHIFT
;
474 gds_info
.oa_per_gfx_partition
= adev
->gds
.oa
.gfx_partition_size
>> AMDGPU_OA_SHIFT
;
475 gds_info
.oa_per_compute_partition
= adev
->gds
.oa
.cs_partition_size
>> AMDGPU_OA_SHIFT
;
476 return copy_to_user(out
, &gds_info
,
477 min((size_t)size
, sizeof(gds_info
))) ? -EFAULT
: 0;
479 case AMDGPU_INFO_VRAM_GTT
: {
480 struct drm_amdgpu_info_vram_gtt vram_gtt
;
482 vram_gtt
.vram_size
= adev
->mc
.real_vram_size
;
483 vram_gtt
.vram_size
-= adev
->vram_pin_size
;
484 vram_gtt
.vram_cpu_accessible_size
= adev
->mc
.visible_vram_size
;
485 vram_gtt
.vram_cpu_accessible_size
-= (adev
->vram_pin_size
- adev
->invisible_pin_size
);
486 vram_gtt
.gtt_size
= adev
->mman
.bdev
.man
[TTM_PL_TT
].size
;
487 vram_gtt
.gtt_size
*= PAGE_SIZE
;
488 vram_gtt
.gtt_size
-= adev
->gart_pin_size
;
489 return copy_to_user(out
, &vram_gtt
,
490 min((size_t)size
, sizeof(vram_gtt
))) ? -EFAULT
: 0;
492 case AMDGPU_INFO_MEMORY
: {
493 struct drm_amdgpu_memory_info mem
;
495 memset(&mem
, 0, sizeof(mem
));
496 mem
.vram
.total_heap_size
= adev
->mc
.real_vram_size
;
497 mem
.vram
.usable_heap_size
=
498 adev
->mc
.real_vram_size
- adev
->vram_pin_size
;
499 mem
.vram
.heap_usage
=
500 amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
501 mem
.vram
.max_allocation
= mem
.vram
.usable_heap_size
* 3 / 4;
503 mem
.cpu_accessible_vram
.total_heap_size
=
504 adev
->mc
.visible_vram_size
;
505 mem
.cpu_accessible_vram
.usable_heap_size
=
506 adev
->mc
.visible_vram_size
-
507 (adev
->vram_pin_size
- adev
->invisible_pin_size
);
508 mem
.cpu_accessible_vram
.heap_usage
=
509 amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
510 mem
.cpu_accessible_vram
.max_allocation
=
511 mem
.cpu_accessible_vram
.usable_heap_size
* 3 / 4;
513 mem
.gtt
.total_heap_size
= adev
->mman
.bdev
.man
[TTM_PL_TT
].size
;
514 mem
.gtt
.total_heap_size
*= PAGE_SIZE
;
515 mem
.gtt
.usable_heap_size
= mem
.gtt
.total_heap_size
516 - adev
->gart_pin_size
;
518 amdgpu_gtt_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_TT
]);
519 mem
.gtt
.max_allocation
= mem
.gtt
.usable_heap_size
* 3 / 4;
521 return copy_to_user(out
, &mem
,
522 min((size_t)size
, sizeof(mem
)))
525 case AMDGPU_INFO_READ_MMR_REG
: {
526 unsigned n
, alloc_size
;
528 unsigned se_num
= (info
->read_mmr_reg
.instance
>>
529 AMDGPU_INFO_MMR_SE_INDEX_SHIFT
) &
530 AMDGPU_INFO_MMR_SE_INDEX_MASK
;
531 unsigned sh_num
= (info
->read_mmr_reg
.instance
>>
532 AMDGPU_INFO_MMR_SH_INDEX_SHIFT
) &
533 AMDGPU_INFO_MMR_SH_INDEX_MASK
;
535 /* set full masks if the userspace set all bits
536 * in the bitfields */
537 if (se_num
== AMDGPU_INFO_MMR_SE_INDEX_MASK
)
539 if (sh_num
== AMDGPU_INFO_MMR_SH_INDEX_MASK
)
542 if (info
->read_mmr_reg
.count
> 128)
545 regs
= kmalloc_array(info
->read_mmr_reg
.count
, sizeof(*regs
), GFP_KERNEL
);
548 alloc_size
= info
->read_mmr_reg
.count
* sizeof(*regs
);
550 for (i
= 0; i
< info
->read_mmr_reg
.count
; i
++)
551 if (amdgpu_asic_read_register(adev
, se_num
, sh_num
,
552 info
->read_mmr_reg
.dword_offset
+ i
,
554 DRM_DEBUG_KMS("unallowed offset %#x\n",
555 info
->read_mmr_reg
.dword_offset
+ i
);
559 n
= copy_to_user(out
, regs
, min(size
, alloc_size
));
561 return n
? -EFAULT
: 0;
563 case AMDGPU_INFO_DEV_INFO
: {
564 struct drm_amdgpu_info_device dev_info
= {};
566 dev_info
.device_id
= dev
->pdev
->device
;
567 dev_info
.chip_rev
= adev
->rev_id
;
568 dev_info
.external_rev
= adev
->external_rev_id
;
569 dev_info
.pci_rev
= dev
->pdev
->revision
;
570 dev_info
.family
= adev
->family
;
571 dev_info
.num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
572 dev_info
.num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
573 /* return all clocks in KHz */
574 dev_info
.gpu_counter_freq
= amdgpu_asic_get_xclk(adev
) * 10;
575 if (adev
->pm
.dpm_enabled
) {
576 dev_info
.max_engine_clock
= amdgpu_dpm_get_sclk(adev
, false) * 10;
577 dev_info
.max_memory_clock
= amdgpu_dpm_get_mclk(adev
, false) * 10;
579 dev_info
.max_engine_clock
= adev
->clock
.default_sclk
* 10;
580 dev_info
.max_memory_clock
= adev
->clock
.default_mclk
* 10;
582 dev_info
.enabled_rb_pipes_mask
= adev
->gfx
.config
.backend_enable_mask
;
583 dev_info
.num_rb_pipes
= adev
->gfx
.config
.max_backends_per_se
*
584 adev
->gfx
.config
.max_shader_engines
;
585 dev_info
.num_hw_gfx_contexts
= adev
->gfx
.config
.max_hw_contexts
;
587 dev_info
.ids_flags
= 0;
588 if (adev
->flags
& AMD_IS_APU
)
589 dev_info
.ids_flags
|= AMDGPU_IDS_FLAGS_FUSION
;
590 if (amdgpu_sriov_vf(adev
))
591 dev_info
.ids_flags
|= AMDGPU_IDS_FLAGS_PREEMPTION
;
592 dev_info
.virtual_address_offset
= AMDGPU_VA_RESERVED_SIZE
;
593 dev_info
.virtual_address_max
=
594 min(adev
->vm_manager
.max_pfn
* AMDGPU_GPU_PAGE_SIZE
,
595 AMDGPU_VA_HOLE_START
);
596 dev_info
.virtual_address_alignment
= max((int)PAGE_SIZE
, AMDGPU_GPU_PAGE_SIZE
);
597 dev_info
.pte_fragment_size
= (1 << adev
->vm_manager
.fragment_size
) * AMDGPU_GPU_PAGE_SIZE
;
598 dev_info
.gart_page_size
= AMDGPU_GPU_PAGE_SIZE
;
599 dev_info
.cu_active_number
= adev
->gfx
.cu_info
.number
;
600 dev_info
.cu_ao_mask
= adev
->gfx
.cu_info
.ao_cu_mask
;
601 dev_info
.ce_ram_size
= adev
->gfx
.ce_ram_size
;
602 memcpy(&dev_info
.cu_ao_bitmap
[0], &adev
->gfx
.cu_info
.ao_cu_bitmap
[0],
603 sizeof(adev
->gfx
.cu_info
.ao_cu_bitmap
));
604 memcpy(&dev_info
.cu_bitmap
[0], &adev
->gfx
.cu_info
.bitmap
[0],
605 sizeof(adev
->gfx
.cu_info
.bitmap
));
606 dev_info
.vram_type
= adev
->mc
.vram_type
;
607 dev_info
.vram_bit_width
= adev
->mc
.vram_width
;
608 dev_info
.vce_harvest_config
= adev
->vce
.harvest_config
;
609 dev_info
.gc_double_offchip_lds_buf
=
610 adev
->gfx
.config
.double_offchip_lds_buf
;
613 dev_info
.prim_buf_gpu_addr
= adev
->gfx
.ngg
.buf
[NGG_PRIM
].gpu_addr
;
614 dev_info
.prim_buf_size
= adev
->gfx
.ngg
.buf
[NGG_PRIM
].size
;
615 dev_info
.pos_buf_gpu_addr
= adev
->gfx
.ngg
.buf
[NGG_POS
].gpu_addr
;
616 dev_info
.pos_buf_size
= adev
->gfx
.ngg
.buf
[NGG_POS
].size
;
617 dev_info
.cntl_sb_buf_gpu_addr
= adev
->gfx
.ngg
.buf
[NGG_CNTL
].gpu_addr
;
618 dev_info
.cntl_sb_buf_size
= adev
->gfx
.ngg
.buf
[NGG_CNTL
].size
;
619 dev_info
.param_buf_gpu_addr
= adev
->gfx
.ngg
.buf
[NGG_PARAM
].gpu_addr
;
620 dev_info
.param_buf_size
= adev
->gfx
.ngg
.buf
[NGG_PARAM
].size
;
622 dev_info
.wave_front_size
= adev
->gfx
.cu_info
.wave_front_size
;
623 dev_info
.num_shader_visible_vgprs
= adev
->gfx
.config
.max_gprs
;
624 dev_info
.num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
625 dev_info
.num_tcc_blocks
= adev
->gfx
.config
.max_texture_channel_caches
;
626 dev_info
.gs_vgt_table_depth
= adev
->gfx
.config
.gs_vgt_table_depth
;
627 dev_info
.gs_prim_buffer_depth
= adev
->gfx
.config
.gs_prim_buffer_depth
;
628 dev_info
.max_gs_waves_per_vgt
= adev
->gfx
.config
.max_gs_threads
;
630 return copy_to_user(out
, &dev_info
,
631 min((size_t)size
, sizeof(dev_info
))) ? -EFAULT
: 0;
633 case AMDGPU_INFO_VCE_CLOCK_TABLE
: {
635 struct drm_amdgpu_info_vce_clock_table vce_clk_table
= {};
636 struct amd_vce_state
*vce_state
;
638 for (i
= 0; i
< AMDGPU_VCE_CLOCK_TABLE_ENTRIES
; i
++) {
639 vce_state
= amdgpu_dpm_get_vce_clock_state(adev
, i
);
641 vce_clk_table
.entries
[i
].sclk
= vce_state
->sclk
;
642 vce_clk_table
.entries
[i
].mclk
= vce_state
->mclk
;
643 vce_clk_table
.entries
[i
].eclk
= vce_state
->evclk
;
644 vce_clk_table
.num_valid_entries
++;
648 return copy_to_user(out
, &vce_clk_table
,
649 min((size_t)size
, sizeof(vce_clk_table
))) ? -EFAULT
: 0;
651 case AMDGPU_INFO_VBIOS
: {
652 uint32_t bios_size
= adev
->bios_size
;
654 switch (info
->vbios_info
.type
) {
655 case AMDGPU_INFO_VBIOS_SIZE
:
656 return copy_to_user(out
, &bios_size
,
657 min((size_t)size
, sizeof(bios_size
)))
659 case AMDGPU_INFO_VBIOS_IMAGE
: {
661 uint32_t bios_offset
= info
->vbios_info
.offset
;
663 if (bios_offset
>= bios_size
)
666 bios
= adev
->bios
+ bios_offset
;
667 return copy_to_user(out
, bios
,
668 min((size_t)size
, (size_t)(bios_size
- bios_offset
)))
672 DRM_DEBUG_KMS("Invalid request %d\n",
673 info
->vbios_info
.type
);
677 case AMDGPU_INFO_NUM_HANDLES
: {
678 struct drm_amdgpu_info_num_handles handle
;
680 switch (info
->query_hw_ip
.type
) {
681 case AMDGPU_HW_IP_UVD
:
682 /* Starting Polaris, we support unlimited UVD handles */
683 if (adev
->asic_type
< CHIP_POLARIS10
) {
684 handle
.uvd_max_handles
= adev
->uvd
.max_handles
;
685 handle
.uvd_used_handles
= amdgpu_uvd_used_handles(adev
);
687 return copy_to_user(out
, &handle
,
688 min((size_t)size
, sizeof(handle
))) ? -EFAULT
: 0;
698 case AMDGPU_INFO_SENSOR
: {
699 struct pp_gpu_power query
= {0};
700 int query_size
= sizeof(query
);
705 switch (info
->sensor_info
.type
) {
706 case AMDGPU_INFO_SENSOR_GFX_SCLK
:
707 /* get sclk in Mhz */
708 if (amdgpu_dpm_read_sensor(adev
,
709 AMDGPU_PP_SENSOR_GFX_SCLK
,
710 (void *)&ui32
, &ui32_size
)) {
715 case AMDGPU_INFO_SENSOR_GFX_MCLK
:
716 /* get mclk in Mhz */
717 if (amdgpu_dpm_read_sensor(adev
,
718 AMDGPU_PP_SENSOR_GFX_MCLK
,
719 (void *)&ui32
, &ui32_size
)) {
724 case AMDGPU_INFO_SENSOR_GPU_TEMP
:
725 /* get temperature in millidegrees C */
726 if (amdgpu_dpm_read_sensor(adev
,
727 AMDGPU_PP_SENSOR_GPU_TEMP
,
728 (void *)&ui32
, &ui32_size
)) {
732 case AMDGPU_INFO_SENSOR_GPU_LOAD
:
734 if (amdgpu_dpm_read_sensor(adev
,
735 AMDGPU_PP_SENSOR_GPU_LOAD
,
736 (void *)&ui32
, &ui32_size
)) {
740 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER
:
741 /* get average GPU power */
742 if (amdgpu_dpm_read_sensor(adev
,
743 AMDGPU_PP_SENSOR_GPU_POWER
,
744 (void *)&query
, &query_size
)) {
747 ui32
= query
.average_gpu_power
>> 8;
749 case AMDGPU_INFO_SENSOR_VDDNB
:
750 /* get VDDNB in millivolts */
751 if (amdgpu_dpm_read_sensor(adev
,
752 AMDGPU_PP_SENSOR_VDDNB
,
753 (void *)&ui32
, &ui32_size
)) {
757 case AMDGPU_INFO_SENSOR_VDDGFX
:
758 /* get VDDGFX in millivolts */
759 if (amdgpu_dpm_read_sensor(adev
,
760 AMDGPU_PP_SENSOR_VDDGFX
,
761 (void *)&ui32
, &ui32_size
)) {
766 DRM_DEBUG_KMS("Invalid request %d\n",
767 info
->sensor_info
.type
);
770 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
772 case AMDGPU_INFO_VRAM_LOST_COUNTER
:
773 ui32
= atomic_read(&adev
->vram_lost_counter
);
774 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
776 DRM_DEBUG_KMS("Invalid request %d\n", info
->query
);
784 * Outdated mess for old drm with Xorg being in charge (void function now).
787 * amdgpu_driver_lastclose_kms - drm callback for last close
789 * @dev: drm dev pointer
791 * Switch vga_switcheroo state after last close (all asics).
793 void amdgpu_driver_lastclose_kms(struct drm_device
*dev
)
795 struct amdgpu_device
*adev
= dev
->dev_private
;
797 amdgpu_fbdev_restore_mode(adev
);
798 vga_switcheroo_process_delayed_switch();
802 * amdgpu_driver_open_kms - drm callback for open
804 * @dev: drm dev pointer
805 * @file_priv: drm file
807 * On device open, init vm on cayman+ (all asics).
808 * Returns 0 on success, error on failure.
810 int amdgpu_driver_open_kms(struct drm_device
*dev
, struct drm_file
*file_priv
)
812 struct amdgpu_device
*adev
= dev
->dev_private
;
813 struct amdgpu_fpriv
*fpriv
;
816 file_priv
->driver_priv
= NULL
;
818 r
= pm_runtime_get_sync(dev
->dev
);
822 fpriv
= kzalloc(sizeof(*fpriv
), GFP_KERNEL
);
823 if (unlikely(!fpriv
)) {
828 r
= amdgpu_vm_init(adev
, &fpriv
->vm
,
829 AMDGPU_VM_CONTEXT_GFX
, 0);
835 fpriv
->prt_va
= amdgpu_vm_bo_add(adev
, &fpriv
->vm
, NULL
);
836 if (!fpriv
->prt_va
) {
838 amdgpu_vm_fini(adev
, &fpriv
->vm
);
843 if (amdgpu_sriov_vf(adev
)) {
844 r
= amdgpu_map_static_csa(adev
, &fpriv
->vm
, &fpriv
->csa_va
);
846 amdgpu_vm_fini(adev
, &fpriv
->vm
);
852 mutex_init(&fpriv
->bo_list_lock
);
853 idr_init(&fpriv
->bo_list_handles
);
855 amdgpu_ctx_mgr_init(&fpriv
->ctx_mgr
);
857 file_priv
->driver_priv
= fpriv
;
860 pm_runtime_mark_last_busy(dev
->dev
);
861 pm_runtime_put_autosuspend(dev
->dev
);
867 * amdgpu_driver_postclose_kms - drm callback for post close
869 * @dev: drm dev pointer
870 * @file_priv: drm file
872 * On device post close, tear down vm on cayman+ (all asics).
874 void amdgpu_driver_postclose_kms(struct drm_device
*dev
,
875 struct drm_file
*file_priv
)
877 struct amdgpu_device
*adev
= dev
->dev_private
;
878 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
879 struct amdgpu_bo_list
*list
;
885 pm_runtime_get_sync(dev
->dev
);
887 amdgpu_ctx_mgr_fini(&fpriv
->ctx_mgr
);
889 if (adev
->asic_type
!= CHIP_RAVEN
) {
890 amdgpu_uvd_free_handles(adev
, file_priv
);
891 amdgpu_vce_free_handles(adev
, file_priv
);
894 amdgpu_vm_bo_rmv(adev
, fpriv
->prt_va
);
896 if (amdgpu_sriov_vf(adev
)) {
897 /* TODO: how to handle reserve failure */
898 BUG_ON(amdgpu_bo_reserve(adev
->virt
.csa_obj
, true));
899 amdgpu_vm_bo_rmv(adev
, fpriv
->csa_va
);
900 fpriv
->csa_va
= NULL
;
901 amdgpu_bo_unreserve(adev
->virt
.csa_obj
);
904 amdgpu_vm_fini(adev
, &fpriv
->vm
);
906 idr_for_each_entry(&fpriv
->bo_list_handles
, list
, handle
)
907 amdgpu_bo_list_free(list
);
909 idr_destroy(&fpriv
->bo_list_handles
);
910 mutex_destroy(&fpriv
->bo_list_lock
);
913 file_priv
->driver_priv
= NULL
;
915 pm_runtime_mark_last_busy(dev
->dev
);
916 pm_runtime_put_autosuspend(dev
->dev
);
920 * VBlank related functions.
923 * amdgpu_get_vblank_counter_kms - get frame count
925 * @dev: drm dev pointer
926 * @pipe: crtc to get the frame count from
928 * Gets the frame count on the requested crtc (all asics).
929 * Returns frame count on success, -EINVAL on failure.
931 u32
amdgpu_get_vblank_counter_kms(struct drm_device
*dev
, unsigned int pipe
)
933 struct amdgpu_device
*adev
= dev
->dev_private
;
934 int vpos
, hpos
, stat
;
937 if (pipe
>= adev
->mode_info
.num_crtc
) {
938 DRM_ERROR("Invalid crtc %u\n", pipe
);
942 /* The hw increments its frame counter at start of vsync, not at start
943 * of vblank, as is required by DRM core vblank counter handling.
944 * Cook the hw count here to make it appear to the caller as if it
945 * incremented at start of vblank. We measure distance to start of
946 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
947 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
948 * result by 1 to give the proper appearance to caller.
950 if (adev
->mode_info
.crtcs
[pipe
]) {
951 /* Repeat readout if needed to provide stable result if
952 * we cross start of vsync during the queries.
955 count
= amdgpu_display_vblank_get_counter(adev
, pipe
);
956 /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
957 * distance to start of vblank, instead of regular
958 * vertical scanout pos.
960 stat
= amdgpu_get_crtc_scanoutpos(
961 dev
, pipe
, GET_DISTANCE_TO_VBLANKSTART
,
962 &vpos
, &hpos
, NULL
, NULL
,
963 &adev
->mode_info
.crtcs
[pipe
]->base
.hwmode
);
964 } while (count
!= amdgpu_display_vblank_get_counter(adev
, pipe
));
966 if (((stat
& (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
)) !=
967 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
))) {
968 DRM_DEBUG_VBL("Query failed! stat %d\n", stat
);
970 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
973 /* Bump counter if we are at >= leading edge of vblank,
974 * but before vsync where vpos would turn negative and
975 * the hw counter really increments.
981 /* Fallback to use value as is. */
982 count
= amdgpu_display_vblank_get_counter(adev
, pipe
);
983 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
990 * amdgpu_enable_vblank_kms - enable vblank interrupt
992 * @dev: drm dev pointer
993 * @pipe: crtc to enable vblank interrupt for
995 * Enable the interrupt on the requested crtc (all asics).
996 * Returns 0 on success, -EINVAL on failure.
998 int amdgpu_enable_vblank_kms(struct drm_device
*dev
, unsigned int pipe
)
1000 struct amdgpu_device
*adev
= dev
->dev_private
;
1001 int idx
= amdgpu_crtc_idx_to_irq_type(adev
, pipe
);
1003 return amdgpu_irq_get(adev
, &adev
->crtc_irq
, idx
);
1007 * amdgpu_disable_vblank_kms - disable vblank interrupt
1009 * @dev: drm dev pointer
1010 * @pipe: crtc to disable vblank interrupt for
1012 * Disable the interrupt on the requested crtc (all asics).
1014 void amdgpu_disable_vblank_kms(struct drm_device
*dev
, unsigned int pipe
)
1016 struct amdgpu_device
*adev
= dev
->dev_private
;
1017 int idx
= amdgpu_crtc_idx_to_irq_type(adev
, pipe
);
1019 amdgpu_irq_put(adev
, &adev
->crtc_irq
, idx
);
1022 const struct drm_ioctl_desc amdgpu_ioctls_kms
[] = {
1023 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE
, amdgpu_gem_create_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1024 DRM_IOCTL_DEF_DRV(AMDGPU_CTX
, amdgpu_ctx_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1025 DRM_IOCTL_DEF_DRV(AMDGPU_VM
, amdgpu_vm_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1026 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED
, amdgpu_sched_ioctl
, DRM_MASTER
),
1027 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST
, amdgpu_bo_list_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1028 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE
, amdgpu_cs_fence_to_handle_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1030 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP
, amdgpu_gem_mmap_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1031 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE
, amdgpu_gem_wait_idle_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1032 DRM_IOCTL_DEF_DRV(AMDGPU_CS
, amdgpu_cs_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1033 DRM_IOCTL_DEF_DRV(AMDGPU_INFO
, amdgpu_info_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1034 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS
, amdgpu_cs_wait_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1035 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES
, amdgpu_cs_wait_fences_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1036 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA
, amdgpu_gem_metadata_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1037 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA
, amdgpu_gem_va_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1038 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP
, amdgpu_gem_op_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1039 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR
, amdgpu_gem_userptr_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
)
1041 const int amdgpu_max_kms_ioctl
= ARRAY_SIZE(amdgpu_ioctls_kms
);
1046 #if defined(CONFIG_DEBUG_FS)
1048 static int amdgpu_debugfs_firmware_info(struct seq_file
*m
, void *data
)
1050 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1051 struct drm_device
*dev
= node
->minor
->dev
;
1052 struct amdgpu_device
*adev
= dev
->dev_private
;
1053 struct drm_amdgpu_info_firmware fw_info
;
1054 struct drm_amdgpu_query_fw query_fw
;
1058 query_fw
.fw_type
= AMDGPU_INFO_FW_VCE
;
1059 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1062 seq_printf(m
, "VCE feature version: %u, firmware version: 0x%08x\n",
1063 fw_info
.feature
, fw_info
.ver
);
1066 query_fw
.fw_type
= AMDGPU_INFO_FW_UVD
;
1067 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1070 seq_printf(m
, "UVD feature version: %u, firmware version: 0x%08x\n",
1071 fw_info
.feature
, fw_info
.ver
);
1074 query_fw
.fw_type
= AMDGPU_INFO_FW_GMC
;
1075 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1078 seq_printf(m
, "MC feature version: %u, firmware version: 0x%08x\n",
1079 fw_info
.feature
, fw_info
.ver
);
1082 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_ME
;
1083 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1086 seq_printf(m
, "ME feature version: %u, firmware version: 0x%08x\n",
1087 fw_info
.feature
, fw_info
.ver
);
1090 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_PFP
;
1091 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1094 seq_printf(m
, "PFP feature version: %u, firmware version: 0x%08x\n",
1095 fw_info
.feature
, fw_info
.ver
);
1098 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_CE
;
1099 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1102 seq_printf(m
, "CE feature version: %u, firmware version: 0x%08x\n",
1103 fw_info
.feature
, fw_info
.ver
);
1106 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC
;
1107 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1110 seq_printf(m
, "RLC feature version: %u, firmware version: 0x%08x\n",
1111 fw_info
.feature
, fw_info
.ver
);
1114 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_MEC
;
1116 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1119 seq_printf(m
, "MEC feature version: %u, firmware version: 0x%08x\n",
1120 fw_info
.feature
, fw_info
.ver
);
1123 if (adev
->asic_type
== CHIP_KAVERI
||
1124 (adev
->asic_type
> CHIP_TOPAZ
&& adev
->asic_type
!= CHIP_STONEY
)) {
1126 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1129 seq_printf(m
, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1130 fw_info
.feature
, fw_info
.ver
);
1134 query_fw
.fw_type
= AMDGPU_INFO_FW_SOS
;
1135 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1138 seq_printf(m
, "SOS feature version: %u, firmware version: 0x%08x\n",
1139 fw_info
.feature
, fw_info
.ver
);
1143 query_fw
.fw_type
= AMDGPU_INFO_FW_ASD
;
1144 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1147 seq_printf(m
, "ASD feature version: %u, firmware version: 0x%08x\n",
1148 fw_info
.feature
, fw_info
.ver
);
1151 query_fw
.fw_type
= AMDGPU_INFO_FW_SMC
;
1152 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1155 seq_printf(m
, "SMC feature version: %u, firmware version: 0x%08x\n",
1156 fw_info
.feature
, fw_info
.ver
);
1159 query_fw
.fw_type
= AMDGPU_INFO_FW_SDMA
;
1160 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
1162 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1165 seq_printf(m
, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1166 i
, fw_info
.feature
, fw_info
.ver
);
1172 static const struct drm_info_list amdgpu_firmware_info_list
[] = {
1173 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info
, 0, NULL
},
1177 int amdgpu_debugfs_firmware_init(struct amdgpu_device
*adev
)
1179 #if defined(CONFIG_DEBUG_FS)
1180 return amdgpu_debugfs_add_files(adev
, amdgpu_firmware_info_list
,
1181 ARRAY_SIZE(amdgpu_firmware_info_list
));