2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
34 #include <linux/vga_switcheroo.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
38 #if defined(CONFIG_VGA_SWITCHEROO)
39 bool amdgpu_has_atpx(void);
41 static inline bool amdgpu_has_atpx(void) { return false; }
45 * amdgpu_driver_unload_kms - Main unload function for KMS.
47 * @dev: drm dev pointer
49 * This is the main unload function for KMS (all asics).
50 * Returns 0 on success.
52 int amdgpu_driver_unload_kms(struct drm_device
*dev
)
54 struct amdgpu_device
*adev
= dev
->dev_private
;
59 if (adev
->rmmio
== NULL
)
62 pm_runtime_get_sync(dev
->dev
);
64 amdgpu_acpi_fini(adev
);
66 amdgpu_device_fini(adev
);
70 dev
->dev_private
= NULL
;
75 * amdgpu_driver_load_kms - Main load function for KMS.
77 * @dev: drm dev pointer
78 * @flags: device flags
80 * This is the main load function for KMS (all asics).
81 * Returns 0 on success, error on failure.
83 int amdgpu_driver_load_kms(struct drm_device
*dev
, unsigned long flags
)
85 struct amdgpu_device
*adev
;
88 adev
= kzalloc(sizeof(struct amdgpu_device
), GFP_KERNEL
);
92 dev
->dev_private
= (void *)adev
;
94 if ((amdgpu_runtime_pm
!= 0) &&
96 ((flags
& AMDGPU_IS_APU
) == 0))
97 flags
|= AMDGPU_IS_PX
;
99 /* amdgpu_device_init should report only fatal error
100 * like memory allocation failure or iomapping failure,
101 * or memory manager initialization failure, it must
102 * properly initialize the GPU MC controller and permit
105 r
= amdgpu_device_init(adev
, dev
, dev
->pdev
, flags
);
107 dev_err(&dev
->pdev
->dev
, "Fatal error during GPU init\n");
111 /* Call ACPI methods: require modeset init
112 * but failure is not fatal
115 acpi_status
= amdgpu_acpi_init(adev
);
117 dev_dbg(&dev
->pdev
->dev
,
118 "Error during ACPI methods call\n");
121 if (amdgpu_device_is_px(dev
)) {
122 pm_runtime_use_autosuspend(dev
->dev
);
123 pm_runtime_set_autosuspend_delay(dev
->dev
, 5000);
124 pm_runtime_set_active(dev
->dev
);
125 pm_runtime_allow(dev
->dev
);
126 pm_runtime_mark_last_busy(dev
->dev
);
127 pm_runtime_put_autosuspend(dev
->dev
);
132 amdgpu_driver_unload_kms(dev
);
139 * Userspace get information ioctl
142 * amdgpu_info_ioctl - answer a device specific request.
144 * @adev: amdgpu device pointer
145 * @data: request object
148 * This function is used to pass device specific parameters to the userspace
149 * drivers. Examples include: pci device id, pipeline parms, tiling params,
151 * Returns 0 on success, -EINVAL on failure.
153 static int amdgpu_info_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
155 struct amdgpu_device
*adev
= dev
->dev_private
;
156 struct drm_amdgpu_info
*info
= data
;
157 struct amdgpu_mode_info
*minfo
= &adev
->mode_info
;
158 void __user
*out
= (void __user
*)(long)info
->return_pointer
;
159 uint32_t size
= info
->return_size
;
160 struct drm_crtc
*crtc
;
165 if (!info
->return_size
|| !info
->return_pointer
)
168 switch (info
->query
) {
169 case AMDGPU_INFO_ACCEL_WORKING
:
170 ui32
= adev
->accel_working
;
171 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
172 case AMDGPU_INFO_CRTC_FROM_ID
:
173 for (i
= 0, found
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
174 crtc
= (struct drm_crtc
*)minfo
->crtcs
[i
];
175 if (crtc
&& crtc
->base
.id
== info
->mode_crtc
.id
) {
176 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
177 ui32
= amdgpu_crtc
->crtc_id
;
183 DRM_DEBUG_KMS("unknown crtc id %d\n", info
->mode_crtc
.id
);
186 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
187 case AMDGPU_INFO_HW_IP_INFO
: {
188 struct drm_amdgpu_info_hw_ip ip
= {};
189 enum amd_ip_block_type type
;
190 uint32_t ring_mask
= 0;
191 uint32_t ib_start_alignment
= 0;
192 uint32_t ib_size_alignment
= 0;
194 if (info
->query_hw_ip
.ip_instance
>= AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
197 switch (info
->query_hw_ip
.type
) {
198 case AMDGPU_HW_IP_GFX
:
199 type
= AMD_IP_BLOCK_TYPE_GFX
;
200 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
201 ring_mask
|= ((adev
->gfx
.gfx_ring
[i
].ready
? 1 : 0) << i
);
202 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
203 ib_size_alignment
= 8;
205 case AMDGPU_HW_IP_COMPUTE
:
206 type
= AMD_IP_BLOCK_TYPE_GFX
;
207 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
208 ring_mask
|= ((adev
->gfx
.compute_ring
[i
].ready
? 1 : 0) << i
);
209 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
210 ib_size_alignment
= 8;
212 case AMDGPU_HW_IP_DMA
:
213 type
= AMD_IP_BLOCK_TYPE_SDMA
;
214 ring_mask
= adev
->sdma
[0].ring
.ready
? 1 : 0;
215 ring_mask
|= ((adev
->sdma
[1].ring
.ready
? 1 : 0) << 1);
216 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
217 ib_size_alignment
= 1;
219 case AMDGPU_HW_IP_UVD
:
220 type
= AMD_IP_BLOCK_TYPE_UVD
;
221 ring_mask
= adev
->uvd
.ring
.ready
? 1 : 0;
222 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
223 ib_size_alignment
= 8;
225 case AMDGPU_HW_IP_VCE
:
226 type
= AMD_IP_BLOCK_TYPE_VCE
;
227 for (i
= 0; i
< AMDGPU_MAX_VCE_RINGS
; i
++)
228 ring_mask
|= ((adev
->vce
.ring
[i
].ready
? 1 : 0) << i
);
229 ib_start_alignment
= AMDGPU_GPU_PAGE_SIZE
;
230 ib_size_alignment
= 8;
236 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
237 if (adev
->ip_blocks
[i
].type
== type
&&
238 adev
->ip_block_status
[i
].valid
) {
239 ip
.hw_ip_version_major
= adev
->ip_blocks
[i
].major
;
240 ip
.hw_ip_version_minor
= adev
->ip_blocks
[i
].minor
;
241 ip
.capabilities_flags
= 0;
242 ip
.available_rings
= ring_mask
;
243 ip
.ib_start_alignment
= ib_start_alignment
;
244 ip
.ib_size_alignment
= ib_size_alignment
;
248 return copy_to_user(out
, &ip
,
249 min((size_t)size
, sizeof(ip
))) ? -EFAULT
: 0;
251 case AMDGPU_INFO_HW_IP_COUNT
: {
252 enum amd_ip_block_type type
;
255 switch (info
->query_hw_ip
.type
) {
256 case AMDGPU_HW_IP_GFX
:
257 type
= AMD_IP_BLOCK_TYPE_GFX
;
259 case AMDGPU_HW_IP_COMPUTE
:
260 type
= AMD_IP_BLOCK_TYPE_GFX
;
262 case AMDGPU_HW_IP_DMA
:
263 type
= AMD_IP_BLOCK_TYPE_SDMA
;
265 case AMDGPU_HW_IP_UVD
:
266 type
= AMD_IP_BLOCK_TYPE_UVD
;
268 case AMDGPU_HW_IP_VCE
:
269 type
= AMD_IP_BLOCK_TYPE_VCE
;
275 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
276 if (adev
->ip_blocks
[i
].type
== type
&&
277 adev
->ip_block_status
[i
].valid
&&
278 count
< AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
281 return copy_to_user(out
, &count
, min(size
, 4u)) ? -EFAULT
: 0;
283 case AMDGPU_INFO_TIMESTAMP
:
284 ui64
= amdgpu_asic_get_gpu_clock_counter(adev
);
285 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
286 case AMDGPU_INFO_FW_VERSION
: {
287 struct drm_amdgpu_info_firmware fw_info
;
289 /* We only support one instance of each IP block right now. */
290 if (info
->query_fw
.ip_instance
!= 0)
293 switch (info
->query_fw
.fw_type
) {
294 case AMDGPU_INFO_FW_VCE
:
295 fw_info
.ver
= adev
->vce
.fw_version
;
296 fw_info
.feature
= adev
->vce
.fb_version
;
298 case AMDGPU_INFO_FW_UVD
:
302 case AMDGPU_INFO_FW_GMC
:
303 fw_info
.ver
= adev
->mc
.fw_version
;
306 case AMDGPU_INFO_FW_GFX_ME
:
307 fw_info
.ver
= adev
->gfx
.me_fw_version
;
308 fw_info
.feature
= adev
->gfx
.me_feature_version
;
310 case AMDGPU_INFO_FW_GFX_PFP
:
311 fw_info
.ver
= adev
->gfx
.pfp_fw_version
;
312 fw_info
.feature
= adev
->gfx
.pfp_feature_version
;
314 case AMDGPU_INFO_FW_GFX_CE
:
315 fw_info
.ver
= adev
->gfx
.ce_fw_version
;
316 fw_info
.feature
= adev
->gfx
.ce_feature_version
;
318 case AMDGPU_INFO_FW_GFX_RLC
:
319 fw_info
.ver
= adev
->gfx
.rlc_fw_version
;
320 fw_info
.feature
= adev
->gfx
.rlc_feature_version
;
322 case AMDGPU_INFO_FW_GFX_MEC
:
323 if (info
->query_fw
.index
== 0) {
324 fw_info
.ver
= adev
->gfx
.mec_fw_version
;
325 fw_info
.feature
= adev
->gfx
.mec_feature_version
;
326 } else if (info
->query_fw
.index
== 1) {
327 fw_info
.ver
= adev
->gfx
.mec2_fw_version
;
328 fw_info
.feature
= adev
->gfx
.mec2_feature_version
;
332 case AMDGPU_INFO_FW_SMC
:
333 fw_info
.ver
= adev
->pm
.fw_version
;
336 case AMDGPU_INFO_FW_SDMA
:
337 if (info
->query_fw
.index
>= 2)
339 fw_info
.ver
= adev
->sdma
[info
->query_fw
.index
].fw_version
;
340 fw_info
.feature
= adev
->sdma
[info
->query_fw
.index
].feature_version
;
345 return copy_to_user(out
, &fw_info
,
346 min((size_t)size
, sizeof(fw_info
))) ? -EFAULT
: 0;
348 case AMDGPU_INFO_NUM_BYTES_MOVED
:
349 ui64
= atomic64_read(&adev
->num_bytes_moved
);
350 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
351 case AMDGPU_INFO_VRAM_USAGE
:
352 ui64
= atomic64_read(&adev
->vram_usage
);
353 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
354 case AMDGPU_INFO_VIS_VRAM_USAGE
:
355 ui64
= atomic64_read(&adev
->vram_vis_usage
);
356 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
357 case AMDGPU_INFO_GTT_USAGE
:
358 ui64
= atomic64_read(&adev
->gtt_usage
);
359 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
360 case AMDGPU_INFO_GDS_CONFIG
: {
361 struct drm_amdgpu_info_gds gds_info
;
363 memset(&gds_info
, 0, sizeof(gds_info
));
364 gds_info
.gds_gfx_partition_size
= adev
->gds
.mem
.gfx_partition_size
>> AMDGPU_GDS_SHIFT
;
365 gds_info
.compute_partition_size
= adev
->gds
.mem
.cs_partition_size
>> AMDGPU_GDS_SHIFT
;
366 gds_info
.gds_total_size
= adev
->gds
.mem
.total_size
>> AMDGPU_GDS_SHIFT
;
367 gds_info
.gws_per_gfx_partition
= adev
->gds
.gws
.gfx_partition_size
>> AMDGPU_GWS_SHIFT
;
368 gds_info
.gws_per_compute_partition
= adev
->gds
.gws
.cs_partition_size
>> AMDGPU_GWS_SHIFT
;
369 gds_info
.oa_per_gfx_partition
= adev
->gds
.oa
.gfx_partition_size
>> AMDGPU_OA_SHIFT
;
370 gds_info
.oa_per_compute_partition
= adev
->gds
.oa
.cs_partition_size
>> AMDGPU_OA_SHIFT
;
371 return copy_to_user(out
, &gds_info
,
372 min((size_t)size
, sizeof(gds_info
))) ? -EFAULT
: 0;
374 case AMDGPU_INFO_VRAM_GTT
: {
375 struct drm_amdgpu_info_vram_gtt vram_gtt
;
377 vram_gtt
.vram_size
= adev
->mc
.real_vram_size
;
378 vram_gtt
.vram_cpu_accessible_size
= adev
->mc
.visible_vram_size
;
379 vram_gtt
.vram_cpu_accessible_size
-= adev
->vram_pin_size
;
380 vram_gtt
.gtt_size
= adev
->mc
.gtt_size
;
381 vram_gtt
.gtt_size
-= adev
->gart_pin_size
;
382 return copy_to_user(out
, &vram_gtt
,
383 min((size_t)size
, sizeof(vram_gtt
))) ? -EFAULT
: 0;
385 case AMDGPU_INFO_READ_MMR_REG
: {
386 unsigned n
, alloc_size
= info
->read_mmr_reg
.count
* 4;
388 unsigned se_num
= (info
->read_mmr_reg
.instance
>>
389 AMDGPU_INFO_MMR_SE_INDEX_SHIFT
) &
390 AMDGPU_INFO_MMR_SE_INDEX_MASK
;
391 unsigned sh_num
= (info
->read_mmr_reg
.instance
>>
392 AMDGPU_INFO_MMR_SH_INDEX_SHIFT
) &
393 AMDGPU_INFO_MMR_SH_INDEX_MASK
;
395 /* set full masks if the userspace set all bits
396 * in the bitfields */
397 if (se_num
== AMDGPU_INFO_MMR_SE_INDEX_MASK
)
399 if (sh_num
== AMDGPU_INFO_MMR_SH_INDEX_MASK
)
402 regs
= kmalloc(alloc_size
, GFP_KERNEL
);
406 for (i
= 0; i
< info
->read_mmr_reg
.count
; i
++)
407 if (amdgpu_asic_read_register(adev
, se_num
, sh_num
,
408 info
->read_mmr_reg
.dword_offset
+ i
,
410 DRM_DEBUG_KMS("unallowed offset %#x\n",
411 info
->read_mmr_reg
.dword_offset
+ i
);
415 n
= copy_to_user(out
, regs
, min(size
, alloc_size
));
417 return n
? -EFAULT
: 0;
419 case AMDGPU_INFO_DEV_INFO
: {
420 struct drm_amdgpu_info_device dev_info
= {};
421 struct amdgpu_cu_info cu_info
;
423 dev_info
.device_id
= dev
->pdev
->device
;
424 dev_info
.chip_rev
= adev
->rev_id
;
425 dev_info
.external_rev
= adev
->external_rev_id
;
426 dev_info
.pci_rev
= dev
->pdev
->revision
;
427 dev_info
.family
= adev
->family
;
428 dev_info
.num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
429 dev_info
.num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
430 /* return all clocks in KHz */
431 dev_info
.gpu_counter_freq
= amdgpu_asic_get_xclk(adev
) * 10;
432 if (adev
->pm
.dpm_enabled
) {
433 dev_info
.max_engine_clock
=
434 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.sclk
* 10;
435 dev_info
.max_memory_clock
=
436 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.mclk
* 10;
438 dev_info
.max_engine_clock
= adev
->pm
.default_sclk
* 10;
439 dev_info
.max_memory_clock
= adev
->pm
.default_mclk
* 10;
441 dev_info
.enabled_rb_pipes_mask
= adev
->gfx
.config
.backend_enable_mask
;
442 dev_info
.num_rb_pipes
= adev
->gfx
.config
.max_backends_per_se
*
443 adev
->gfx
.config
.max_shader_engines
;
444 dev_info
.num_hw_gfx_contexts
= adev
->gfx
.config
.max_hw_contexts
;
446 dev_info
.ids_flags
= 0;
447 if (adev
->flags
& AMDGPU_IS_APU
)
448 dev_info
.ids_flags
|= AMDGPU_IDS_FLAGS_FUSION
;
449 dev_info
.virtual_address_offset
= AMDGPU_VA_RESERVED_SIZE
;
450 dev_info
.virtual_address_max
= (uint64_t)adev
->vm_manager
.max_pfn
* AMDGPU_GPU_PAGE_SIZE
;
451 dev_info
.virtual_address_alignment
= max(PAGE_SIZE
, 0x10000UL
);
452 dev_info
.pte_fragment_size
= (1 << AMDGPU_LOG2_PAGES_PER_FRAG
) *
453 AMDGPU_GPU_PAGE_SIZE
;
454 dev_info
.gart_page_size
= AMDGPU_GPU_PAGE_SIZE
;
456 amdgpu_asic_get_cu_info(adev
, &cu_info
);
457 dev_info
.cu_active_number
= cu_info
.number
;
458 dev_info
.cu_ao_mask
= cu_info
.ao_cu_mask
;
459 dev_info
.ce_ram_size
= adev
->gfx
.ce_ram_size
;
460 memcpy(&dev_info
.cu_bitmap
[0], &cu_info
.bitmap
[0], sizeof(cu_info
.bitmap
));
461 dev_info
.vram_type
= adev
->mc
.vram_type
;
462 dev_info
.vram_bit_width
= adev
->mc
.vram_width
;
463 dev_info
.vce_harvest_config
= adev
->vce
.harvest_config
;
465 return copy_to_user(out
, &dev_info
,
466 min((size_t)size
, sizeof(dev_info
))) ? -EFAULT
: 0;
469 DRM_DEBUG_KMS("Invalid request %d\n", info
->query
);
477 * Outdated mess for old drm with Xorg being in charge (void function now).
480 * amdgpu_driver_firstopen_kms - drm callback for last close
482 * @dev: drm dev pointer
484 * Switch vga switcheroo state after last close (all asics).
486 void amdgpu_driver_lastclose_kms(struct drm_device
*dev
)
488 vga_switcheroo_process_delayed_switch();
492 * amdgpu_driver_open_kms - drm callback for open
494 * @dev: drm dev pointer
495 * @file_priv: drm file
497 * On device open, init vm on cayman+ (all asics).
498 * Returns 0 on success, error on failure.
500 int amdgpu_driver_open_kms(struct drm_device
*dev
, struct drm_file
*file_priv
)
502 struct amdgpu_device
*adev
= dev
->dev_private
;
503 struct amdgpu_fpriv
*fpriv
;
506 file_priv
->driver_priv
= NULL
;
508 r
= pm_runtime_get_sync(dev
->dev
);
512 fpriv
= kzalloc(sizeof(*fpriv
), GFP_KERNEL
);
513 if (unlikely(!fpriv
))
516 r
= amdgpu_vm_init(adev
, &fpriv
->vm
);
520 mutex_init(&fpriv
->bo_list_lock
);
521 idr_init(&fpriv
->bo_list_handles
);
523 /* init context manager */
524 mutex_init(&fpriv
->ctx_mgr
.lock
);
525 idr_init(&fpriv
->ctx_mgr
.ctx_handles
);
526 fpriv
->ctx_mgr
.adev
= adev
;
528 file_priv
->driver_priv
= fpriv
;
530 pm_runtime_mark_last_busy(dev
->dev
);
531 pm_runtime_put_autosuspend(dev
->dev
);
541 * amdgpu_driver_postclose_kms - drm callback for post close
543 * @dev: drm dev pointer
544 * @file_priv: drm file
546 * On device post close, tear down vm on cayman+ (all asics).
548 void amdgpu_driver_postclose_kms(struct drm_device
*dev
,
549 struct drm_file
*file_priv
)
551 struct amdgpu_device
*adev
= dev
->dev_private
;
552 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
553 struct amdgpu_bo_list
*list
;
559 amdgpu_vm_fini(adev
, &fpriv
->vm
);
561 idr_for_each_entry(&fpriv
->bo_list_handles
, list
, handle
)
562 amdgpu_bo_list_free(list
);
564 idr_destroy(&fpriv
->bo_list_handles
);
565 mutex_destroy(&fpriv
->bo_list_lock
);
567 /* release context */
568 amdgpu_ctx_fini(fpriv
);
571 file_priv
->driver_priv
= NULL
;
575 * amdgpu_driver_preclose_kms - drm callback for pre close
577 * @dev: drm dev pointer
578 * @file_priv: drm file
580 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
583 void amdgpu_driver_preclose_kms(struct drm_device
*dev
,
584 struct drm_file
*file_priv
)
586 struct amdgpu_device
*adev
= dev
->dev_private
;
588 amdgpu_uvd_free_handles(adev
, file_priv
);
589 amdgpu_vce_free_handles(adev
, file_priv
);
593 * VBlank related functions.
596 * amdgpu_get_vblank_counter_kms - get frame count
598 * @dev: drm dev pointer
599 * @crtc: crtc to get the frame count from
601 * Gets the frame count on the requested crtc (all asics).
602 * Returns frame count on success, -EINVAL on failure.
604 u32
amdgpu_get_vblank_counter_kms(struct drm_device
*dev
, int crtc
)
606 struct amdgpu_device
*adev
= dev
->dev_private
;
608 if (crtc
< 0 || crtc
>= adev
->mode_info
.num_crtc
) {
609 DRM_ERROR("Invalid crtc %d\n", crtc
);
613 return amdgpu_display_vblank_get_counter(adev
, crtc
);
617 * amdgpu_enable_vblank_kms - enable vblank interrupt
619 * @dev: drm dev pointer
620 * @crtc: crtc to enable vblank interrupt for
622 * Enable the interrupt on the requested crtc (all asics).
623 * Returns 0 on success, -EINVAL on failure.
625 int amdgpu_enable_vblank_kms(struct drm_device
*dev
, int crtc
)
627 struct amdgpu_device
*adev
= dev
->dev_private
;
628 int idx
= amdgpu_crtc_idx_to_irq_type(adev
, crtc
);
630 return amdgpu_irq_get(adev
, &adev
->crtc_irq
, idx
);
634 * amdgpu_disable_vblank_kms - disable vblank interrupt
636 * @dev: drm dev pointer
637 * @crtc: crtc to disable vblank interrupt for
639 * Disable the interrupt on the requested crtc (all asics).
641 void amdgpu_disable_vblank_kms(struct drm_device
*dev
, int crtc
)
643 struct amdgpu_device
*adev
= dev
->dev_private
;
644 int idx
= amdgpu_crtc_idx_to_irq_type(adev
, crtc
);
646 amdgpu_irq_put(adev
, &adev
->crtc_irq
, idx
);
650 * amdgpu_get_vblank_timestamp_kms - get vblank timestamp
652 * @dev: drm dev pointer
653 * @crtc: crtc to get the timestamp for
654 * @max_error: max error
655 * @vblank_time: time value
656 * @flags: flags passed to the driver
658 * Gets the timestamp on the requested crtc based on the
659 * scanout position. (all asics).
660 * Returns postive status flags on success, negative error on failure.
662 int amdgpu_get_vblank_timestamp_kms(struct drm_device
*dev
, int crtc
,
664 struct timeval
*vblank_time
,
667 struct drm_crtc
*drmcrtc
;
668 struct amdgpu_device
*adev
= dev
->dev_private
;
670 if (crtc
< 0 || crtc
>= dev
->num_crtcs
) {
671 DRM_ERROR("Invalid crtc %d\n", crtc
);
675 /* Get associated drm_crtc: */
676 drmcrtc
= &adev
->mode_info
.crtcs
[crtc
]->base
;
678 /* Helper routine in DRM core does all the work: */
679 return drm_calc_vbltimestamp_from_scanoutpos(dev
, crtc
, max_error
,
681 drmcrtc
, &drmcrtc
->hwmode
);
684 const struct drm_ioctl_desc amdgpu_ioctls_kms
[] = {
685 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE
, amdgpu_gem_create_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
686 DRM_IOCTL_DEF_DRV(AMDGPU_CTX
, amdgpu_ctx_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
687 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST
, amdgpu_bo_list_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
689 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP
, amdgpu_gem_mmap_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
690 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE
, amdgpu_gem_wait_idle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
691 DRM_IOCTL_DEF_DRV(AMDGPU_CS
, amdgpu_cs_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
692 DRM_IOCTL_DEF_DRV(AMDGPU_INFO
, amdgpu_info_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
693 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS
, amdgpu_cs_wait_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
694 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA
, amdgpu_gem_metadata_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
695 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA
, amdgpu_gem_va_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
696 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP
, amdgpu_gem_op_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
697 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR
, amdgpu_gem_userptr_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
699 int amdgpu_max_kms_ioctl
= ARRAY_SIZE(amdgpu_ioctls_kms
);