2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
28 #include "amdgpu_atombios.h"
29 #include "atombios_crtc.h"
30 #include "atombios_encoders.h"
31 #include "amdgpu_pll.h"
32 #include "amdgpu_connectors.h"
34 #include "bif/bif_3_0_d.h"
35 #include "bif/bif_3_0_sh_mask.h"
36 #include "oss/oss_1_0_d.h"
37 #include "oss/oss_1_0_sh_mask.h"
38 #include "gca/gfx_6_0_d.h"
39 #include "gca/gfx_6_0_sh_mask.h"
40 #include "gmc/gmc_6_0_d.h"
41 #include "gmc/gmc_6_0_sh_mask.h"
42 #include "dce/dce_6_0_d.h"
43 #include "dce/dce_6_0_sh_mask.h"
44 #include "gca/gfx_7_2_enum.h"
47 static void dce_v6_0_set_display_funcs(struct amdgpu_device
*adev
);
48 static void dce_v6_0_set_irq_funcs(struct amdgpu_device
*adev
);
50 static const u32 crtc_offsets
[6] =
52 SI_CRTC0_REGISTER_OFFSET
,
53 SI_CRTC1_REGISTER_OFFSET
,
54 SI_CRTC2_REGISTER_OFFSET
,
55 SI_CRTC3_REGISTER_OFFSET
,
56 SI_CRTC4_REGISTER_OFFSET
,
57 SI_CRTC5_REGISTER_OFFSET
60 static const u32 hpd_offsets
[] =
62 mmDC_HPD1_INT_STATUS
- mmDC_HPD1_INT_STATUS
,
63 mmDC_HPD2_INT_STATUS
- mmDC_HPD1_INT_STATUS
,
64 mmDC_HPD3_INT_STATUS
- mmDC_HPD1_INT_STATUS
,
65 mmDC_HPD4_INT_STATUS
- mmDC_HPD1_INT_STATUS
,
66 mmDC_HPD5_INT_STATUS
- mmDC_HPD1_INT_STATUS
,
67 mmDC_HPD6_INT_STATUS
- mmDC_HPD1_INT_STATUS
,
70 static const uint32_t dig_offsets
[] = {
71 SI_CRTC0_REGISTER_OFFSET
,
72 SI_CRTC1_REGISTER_OFFSET
,
73 SI_CRTC2_REGISTER_OFFSET
,
74 SI_CRTC3_REGISTER_OFFSET
,
75 SI_CRTC4_REGISTER_OFFSET
,
76 SI_CRTC5_REGISTER_OFFSET
,
77 (0x13830 - 0x7030) >> 2,
86 } interrupt_status_offsets
[6] = { {
87 .reg
= mmDISP_INTERRUPT_STATUS
,
88 .vblank
= DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK
,
89 .vline
= DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK
,
90 .hpd
= DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
92 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE
,
93 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK
,
94 .vline
= DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK
,
95 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
97 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE2
,
98 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK
,
99 .vline
= DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK
,
100 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
102 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE3
,
103 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK
,
104 .vline
= DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK
,
105 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
107 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE4
,
108 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK
,
109 .vline
= DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK
,
110 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
112 .reg
= mmDISP_INTERRUPT_STATUS_CONTINUE5
,
113 .vblank
= DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK
,
114 .vline
= DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK
,
115 .hpd
= DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
118 static u32
dce_v6_0_audio_endpt_rreg(struct amdgpu_device
*adev
,
119 u32 block_offset
, u32 reg
)
121 DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
125 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device
*adev
,
126 u32 block_offset
, u32 reg
, u32 v
)
128 DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
131 static bool dce_v6_0_is_in_vblank(struct amdgpu_device
*adev
, int crtc
)
133 if (RREG32(mmCRTC_STATUS
+ crtc_offsets
[crtc
]) & CRTC_STATUS__CRTC_V_BLANK_MASK
)
139 static bool dce_v6_0_is_counter_moving(struct amdgpu_device
*adev
, int crtc
)
143 pos1
= RREG32(mmCRTC_STATUS_POSITION
+ crtc_offsets
[crtc
]);
144 pos2
= RREG32(mmCRTC_STATUS_POSITION
+ crtc_offsets
[crtc
]);
153 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
155 * @crtc: crtc to wait for vblank on
157 * Wait for vblank on the requested crtc (evergreen+).
159 static void dce_v6_0_vblank_wait(struct amdgpu_device
*adev
, int crtc
)
163 if (crtc
>= adev
->mode_info
.num_crtc
)
166 if (!(RREG32(mmCRTC_CONTROL
+ crtc_offsets
[crtc
]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK
))
169 /* depending on when we hit vblank, we may be close to active; if so,
170 * wait for another frame.
172 while (dce_v6_0_is_in_vblank(adev
, crtc
)) {
175 if (!dce_v6_0_is_counter_moving(adev
, crtc
))
180 while (!dce_v6_0_is_in_vblank(adev
, crtc
)) {
183 if (!dce_v6_0_is_counter_moving(adev
, crtc
))
189 static u32
dce_v6_0_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
191 if (crtc
>= adev
->mode_info
.num_crtc
)
194 return RREG32(mmCRTC_STATUS_FRAME_COUNT
+ crtc_offsets
[crtc
]);
197 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device
*adev
)
201 /* Enable pflip interrupts */
202 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++)
203 amdgpu_irq_get(adev
, &adev
->pageflip_irq
, i
);
206 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device
*adev
)
210 /* Disable pflip interrupts */
211 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++)
212 amdgpu_irq_put(adev
, &adev
->pageflip_irq
, i
);
216 * dce_v6_0_page_flip - pageflip callback.
218 * @adev: amdgpu_device pointer
219 * @crtc_id: crtc to cleanup pageflip on
220 * @crtc_base: new address of the crtc (GPU MC address)
222 * Does the actual pageflip (evergreen+).
223 * During vblank we take the crtc lock and wait for the update_pending
224 * bit to go high, when it does, we release the lock, and allow the
225 * double buffered update to take place.
226 * Returns the current update pending status.
228 static void dce_v6_0_page_flip(struct amdgpu_device
*adev
,
229 int crtc_id
, u64 crtc_base
, bool async
)
231 struct amdgpu_crtc
*amdgpu_crtc
= adev
->mode_info
.crtcs
[crtc_id
];
233 /* flip at hsync for async, default is vsync */
234 WREG32(mmGRPH_FLIP_CONTROL
+ amdgpu_crtc
->crtc_offset
, async
?
235 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK
: 0);
236 /* update the scanout addresses */
237 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
238 upper_32_bits(crtc_base
));
239 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
243 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
);
246 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
247 u32
*vbl
, u32
*position
)
249 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
251 *vbl
= RREG32(mmCRTC_V_BLANK_START_END
+ crtc_offsets
[crtc
]);
252 *position
= RREG32(mmCRTC_STATUS_POSITION
+ crtc_offsets
[crtc
]);
259 * dce_v6_0_hpd_sense - hpd sense callback.
261 * @adev: amdgpu_device pointer
262 * @hpd: hpd (hotplug detect) pin
264 * Checks if a digital monitor is connected (evergreen+).
265 * Returns true if connected, false if not connected.
267 static bool dce_v6_0_hpd_sense(struct amdgpu_device
*adev
,
268 enum amdgpu_hpd_id hpd
)
270 bool connected
= false;
272 if (hpd
>= adev
->mode_info
.num_hpd
)
275 if (RREG32(mmDC_HPD1_INT_STATUS
+ hpd_offsets
[hpd
]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK
)
282 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
284 * @adev: amdgpu_device pointer
285 * @hpd: hpd (hotplug detect) pin
287 * Set the polarity of the hpd pin (evergreen+).
289 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device
*adev
,
290 enum amdgpu_hpd_id hpd
)
293 bool connected
= dce_v6_0_hpd_sense(adev
, hpd
);
295 if (hpd
>= adev
->mode_info
.num_hpd
)
298 tmp
= RREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[hpd
]);
300 tmp
&= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK
;
302 tmp
|= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK
;
303 WREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[hpd
], tmp
);
307 * dce_v6_0_hpd_init - hpd setup callback.
309 * @adev: amdgpu_device pointer
311 * Setup the hpd pins used by the card (evergreen+).
312 * Enable the pin, set the polarity, and enable the hpd interrupts.
314 static void dce_v6_0_hpd_init(struct amdgpu_device
*adev
)
316 struct drm_device
*dev
= adev
->ddev
;
317 struct drm_connector
*connector
;
320 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
321 struct amdgpu_connector
*amdgpu_connector
= to_amdgpu_connector(connector
);
323 if (amdgpu_connector
->hpd
.hpd
>= adev
->mode_info
.num_hpd
)
326 tmp
= RREG32(mmDC_HPD1_CONTROL
+ hpd_offsets
[amdgpu_connector
->hpd
.hpd
]);
327 tmp
|= DC_HPD1_CONTROL__DC_HPD1_EN_MASK
;
328 WREG32(mmDC_HPD1_CONTROL
+ hpd_offsets
[amdgpu_connector
->hpd
.hpd
], tmp
);
330 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
||
331 connector
->connector_type
== DRM_MODE_CONNECTOR_LVDS
) {
332 /* don't try to enable hpd on eDP or LVDS avoid breaking the
333 * aux dp channel on imac and help (but not completely fix)
334 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
335 * also avoid interrupt storms during dpms.
337 tmp
= RREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[amdgpu_connector
->hpd
.hpd
]);
338 tmp
&= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK
;
339 WREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[amdgpu_connector
->hpd
.hpd
], tmp
);
343 dce_v6_0_hpd_set_polarity(adev
, amdgpu_connector
->hpd
.hpd
);
344 amdgpu_irq_get(adev
, &adev
->hpd_irq
, amdgpu_connector
->hpd
.hpd
);
350 * dce_v6_0_hpd_fini - hpd tear down callback.
352 * @adev: amdgpu_device pointer
354 * Tear down the hpd pins used by the card (evergreen+).
355 * Disable the hpd interrupts.
357 static void dce_v6_0_hpd_fini(struct amdgpu_device
*adev
)
359 struct drm_device
*dev
= adev
->ddev
;
360 struct drm_connector
*connector
;
363 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
364 struct amdgpu_connector
*amdgpu_connector
= to_amdgpu_connector(connector
);
366 if (amdgpu_connector
->hpd
.hpd
>= adev
->mode_info
.num_hpd
)
369 tmp
= RREG32(mmDC_HPD1_CONTROL
+ hpd_offsets
[amdgpu_connector
->hpd
.hpd
]);
370 tmp
&= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK
;
371 WREG32(mmDC_HPD1_CONTROL
+ hpd_offsets
[amdgpu_connector
->hpd
.hpd
], 0);
373 amdgpu_irq_put(adev
, &adev
->hpd_irq
, amdgpu_connector
->hpd
.hpd
);
377 static u32
dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device
*adev
)
379 return mmDC_GPIO_HPD_A
;
382 static u32
evergreen_get_vblank_counter(struct amdgpu_device
* adev
, int crtc
)
384 if (crtc
>= adev
->mode_info
.num_crtc
)
387 return RREG32(mmCRTC_STATUS_FRAME_COUNT
+ crtc_offsets
[crtc
]);
390 static void dce_v6_0_stop_mc_access(struct amdgpu_device
*adev
,
391 struct amdgpu_mode_mc_save
*save
)
393 u32 crtc_enabled
, tmp
, frame_count
;
396 save
->vga_render_control
= RREG32(mmVGA_RENDER_CONTROL
);
397 save
->vga_hdp_control
= RREG32(mmVGA_HDP_CONTROL
);
399 /* disable VGA render */
400 WREG32(mmVGA_RENDER_CONTROL
, 0);
402 /* blank the display controllers */
403 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
404 crtc_enabled
= RREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK
;
406 save
->crtc_enabled
[i
] = true;
407 tmp
= RREG32(mmCRTC_BLANK_CONTROL
+ crtc_offsets
[i
]);
409 if (!(tmp
& CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK
)) {
410 dce_v6_0_vblank_wait(adev
, i
);
411 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 1);
412 tmp
|= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK
;
413 WREG32(mmCRTC_BLANK_CONTROL
+ crtc_offsets
[i
], tmp
);
414 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 0);
416 /* wait for the next frame */
417 frame_count
= evergreen_get_vblank_counter(adev
, i
);
418 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
419 if (evergreen_get_vblank_counter(adev
, i
) != frame_count
)
424 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
425 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 1);
426 tmp
= RREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
]);
427 tmp
&= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK
;
428 WREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
], tmp
);
429 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 0);
430 save
->crtc_enabled
[i
] = false;
433 save
->crtc_enabled
[i
] = false;
438 static void dce_v6_0_resume_mc_access(struct amdgpu_device
*adev
,
439 struct amdgpu_mode_mc_save
*save
)
444 /* update crtc base addresses */
445 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
446 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ crtc_offsets
[i
],
447 upper_32_bits(adev
->mc
.vram_start
));
448 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ crtc_offsets
[i
],
449 upper_32_bits(adev
->mc
.vram_start
));
450 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS
+ crtc_offsets
[i
],
451 (u32
)adev
->mc
.vram_start
);
452 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS
+ crtc_offsets
[i
],
453 (u32
)adev
->mc
.vram_start
);
456 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH
, upper_32_bits(adev
->mc
.vram_start
));
457 WREG32(mmVGA_MEMORY_BASE_ADDRESS
, (u32
)adev
->mc
.vram_start
);
459 /* unlock regs and wait for update */
460 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
461 if (save
->crtc_enabled
[i
]) {
462 tmp
= RREG32(mmMASTER_UPDATE_MODE
+ crtc_offsets
[i
]);
463 if ((tmp
& 0x7) != 0) {
465 WREG32(mmMASTER_UPDATE_MODE
+ crtc_offsets
[i
], tmp
);
467 tmp
= RREG32(mmGRPH_UPDATE
+ crtc_offsets
[i
]);
468 if (tmp
& GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK
) {
469 tmp
&= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK
;
470 WREG32(mmGRPH_UPDATE
+ crtc_offsets
[i
], tmp
);
472 tmp
= RREG32(mmMASTER_UPDATE_LOCK
+ crtc_offsets
[i
]);
475 WREG32(mmMASTER_UPDATE_LOCK
+ crtc_offsets
[i
], tmp
);
477 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
478 tmp
= RREG32(mmGRPH_UPDATE
+ crtc_offsets
[i
]);
479 if ((tmp
& GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK
) == 0)
486 /* Unlock vga access */
487 WREG32(mmVGA_HDP_CONTROL
, save
->vga_hdp_control
);
489 WREG32(mmVGA_RENDER_CONTROL
, save
->vga_render_control
);
493 static void dce_v6_0_set_vga_render_state(struct amdgpu_device
*adev
,
497 WREG32(mmVGA_RENDER_CONTROL
,
498 RREG32(mmVGA_RENDER_CONTROL
) & VGA_VSTATUS_CNTL
);
502 static int dce_v6_0_get_num_crtc(struct amdgpu_device
*adev
)
506 switch (adev
->asic_type
) {
521 void dce_v6_0_disable_dce(struct amdgpu_device
*adev
)
523 /*Disable VGA render and enabled crtc, if has DCE engine*/
524 if (amdgpu_atombios_has_dce_engine_info(adev
)) {
528 dce_v6_0_set_vga_render_state(adev
, false);
531 for (i
= 0; i
< dce_v6_0_get_num_crtc(adev
); i
++) {
532 crtc_enabled
= RREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
]) &
533 CRTC_CONTROL__CRTC_MASTER_EN_MASK
;
535 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 1);
536 tmp
= RREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
]);
537 tmp
&= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK
;
538 WREG32(mmCRTC_CONTROL
+ crtc_offsets
[i
], tmp
);
539 WREG32(mmCRTC_UPDATE_LOCK
+ crtc_offsets
[i
], 0);
545 static void dce_v6_0_program_fmt(struct drm_encoder
*encoder
)
548 struct drm_device
*dev
= encoder
->dev
;
549 struct amdgpu_device
*adev
= dev
->dev_private
;
550 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
551 struct drm_connector
*connector
= amdgpu_get_connector_for_encoder(encoder
);
552 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(encoder
->crtc
);
555 enum amdgpu_connector_dither dither
= AMDGPU_FMT_DITHER_DISABLE
;
558 struct amdgpu_connector
*amdgpu_connector
= to_amdgpu_connector(connector
);
559 bpc
= amdgpu_connector_get_monitor_bpc(connector
);
560 dither
= amdgpu_connector
->dither
;
563 /* LVDS FMT is set up by atom */
564 if (amdgpu_encoder
->devices
& ATOM_DEVICE_LCD_SUPPORT
)
573 if (dither
== AMDGPU_FMT_DITHER_ENABLE
)
574 /* XXX sort out optimal dither settings */
575 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK
|
576 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK
|
577 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK
);
579 tmp
|= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK
;
582 if (dither
== AMDGPU_FMT_DITHER_ENABLE
)
583 /* XXX sort out optimal dither settings */
584 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK
|
585 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK
|
586 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK
|
587 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK
|
588 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK
);
590 tmp
|= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK
|
591 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK
);
599 WREG32(mmFMT_BIT_DEPTH_CONTROL
+ amdgpu_crtc
->crtc_offset
, tmp
);
603 * cik_get_number_of_dram_channels - get the number of dram channels
605 * @adev: amdgpu_device pointer
607 * Look up the number of video ram channels (CIK).
608 * Used for display watermark bandwidth calculations
609 * Returns the number of dram channels
611 static u32
si_get_number_of_dram_channels(struct amdgpu_device
*adev
)
613 u32 tmp
= RREG32(mmMC_SHARED_CHMAP
);
615 switch ((tmp
& MC_SHARED_CHMAP__NOOFCHAN_MASK
) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT
) {
638 struct dce6_wm_params
{
639 u32 dram_channels
; /* number of dram channels */
640 u32 yclk
; /* bandwidth per dram data pin in kHz */
641 u32 sclk
; /* engine clock in kHz */
642 u32 disp_clk
; /* display clock in kHz */
643 u32 src_width
; /* viewport width */
644 u32 active_time
; /* active display time in ns */
645 u32 blank_time
; /* blank time in ns */
646 bool interlaced
; /* mode is interlaced */
647 fixed20_12 vsc
; /* vertical scale ratio */
648 u32 num_heads
; /* number of active crtcs */
649 u32 bytes_per_pixel
; /* bytes per pixel display + overlay */
650 u32 lb_size
; /* line buffer allocated to pipe */
651 u32 vtaps
; /* vertical scaler taps */
655 * dce_v6_0_dram_bandwidth - get the dram bandwidth
657 * @wm: watermark calculation data
659 * Calculate the raw dram bandwidth (CIK).
660 * Used for display watermark bandwidth calculations
661 * Returns the dram bandwidth in MBytes/s
663 static u32
dce_v6_0_dram_bandwidth(struct dce6_wm_params
*wm
)
665 /* Calculate raw DRAM Bandwidth */
666 fixed20_12 dram_efficiency
; /* 0.7 */
667 fixed20_12 yclk
, dram_channels
, bandwidth
;
670 a
.full
= dfixed_const(1000);
671 yclk
.full
= dfixed_const(wm
->yclk
);
672 yclk
.full
= dfixed_div(yclk
, a
);
673 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
674 a
.full
= dfixed_const(10);
675 dram_efficiency
.full
= dfixed_const(7);
676 dram_efficiency
.full
= dfixed_div(dram_efficiency
, a
);
677 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
678 bandwidth
.full
= dfixed_mul(bandwidth
, dram_efficiency
);
680 return dfixed_trunc(bandwidth
);
684 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
686 * @wm: watermark calculation data
688 * Calculate the dram bandwidth used for display (CIK).
689 * Used for display watermark bandwidth calculations
690 * Returns the dram bandwidth for display in MBytes/s
692 static u32
dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params
*wm
)
694 /* Calculate DRAM Bandwidth and the part allocated to display. */
695 fixed20_12 disp_dram_allocation
; /* 0.3 to 0.7 */
696 fixed20_12 yclk
, dram_channels
, bandwidth
;
699 a
.full
= dfixed_const(1000);
700 yclk
.full
= dfixed_const(wm
->yclk
);
701 yclk
.full
= dfixed_div(yclk
, a
);
702 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
703 a
.full
= dfixed_const(10);
704 disp_dram_allocation
.full
= dfixed_const(3); /* XXX worse case value 0.3 */
705 disp_dram_allocation
.full
= dfixed_div(disp_dram_allocation
, a
);
706 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
707 bandwidth
.full
= dfixed_mul(bandwidth
, disp_dram_allocation
);
709 return dfixed_trunc(bandwidth
);
713 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
715 * @wm: watermark calculation data
717 * Calculate the data return bandwidth used for display (CIK).
718 * Used for display watermark bandwidth calculations
719 * Returns the data return bandwidth in MBytes/s
721 static u32
dce_v6_0_data_return_bandwidth(struct dce6_wm_params
*wm
)
723 /* Calculate the display Data return Bandwidth */
724 fixed20_12 return_efficiency
; /* 0.8 */
725 fixed20_12 sclk
, bandwidth
;
728 a
.full
= dfixed_const(1000);
729 sclk
.full
= dfixed_const(wm
->sclk
);
730 sclk
.full
= dfixed_div(sclk
, a
);
731 a
.full
= dfixed_const(10);
732 return_efficiency
.full
= dfixed_const(8);
733 return_efficiency
.full
= dfixed_div(return_efficiency
, a
);
734 a
.full
= dfixed_const(32);
735 bandwidth
.full
= dfixed_mul(a
, sclk
);
736 bandwidth
.full
= dfixed_mul(bandwidth
, return_efficiency
);
738 return dfixed_trunc(bandwidth
);
742 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
744 * @wm: watermark calculation data
746 * Calculate the dmif bandwidth used for display (CIK).
747 * Used for display watermark bandwidth calculations
748 * Returns the dmif bandwidth in MBytes/s
750 static u32
dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params
*wm
)
752 /* Calculate the DMIF Request Bandwidth */
753 fixed20_12 disp_clk_request_efficiency
; /* 0.8 */
754 fixed20_12 disp_clk
, bandwidth
;
757 a
.full
= dfixed_const(1000);
758 disp_clk
.full
= dfixed_const(wm
->disp_clk
);
759 disp_clk
.full
= dfixed_div(disp_clk
, a
);
760 a
.full
= dfixed_const(32);
761 b
.full
= dfixed_mul(a
, disp_clk
);
763 a
.full
= dfixed_const(10);
764 disp_clk_request_efficiency
.full
= dfixed_const(8);
765 disp_clk_request_efficiency
.full
= dfixed_div(disp_clk_request_efficiency
, a
);
767 bandwidth
.full
= dfixed_mul(b
, disp_clk_request_efficiency
);
769 return dfixed_trunc(bandwidth
);
773 * dce_v6_0_available_bandwidth - get the min available bandwidth
775 * @wm: watermark calculation data
777 * Calculate the min available bandwidth used for display (CIK).
778 * Used for display watermark bandwidth calculations
779 * Returns the min available bandwidth in MBytes/s
781 static u32
dce_v6_0_available_bandwidth(struct dce6_wm_params
*wm
)
783 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
784 u32 dram_bandwidth
= dce_v6_0_dram_bandwidth(wm
);
785 u32 data_return_bandwidth
= dce_v6_0_data_return_bandwidth(wm
);
786 u32 dmif_req_bandwidth
= dce_v6_0_dmif_request_bandwidth(wm
);
788 return min(dram_bandwidth
, min(data_return_bandwidth
, dmif_req_bandwidth
));
792 * dce_v6_0_average_bandwidth - get the average available bandwidth
794 * @wm: watermark calculation data
796 * Calculate the average available bandwidth used for display (CIK).
797 * Used for display watermark bandwidth calculations
798 * Returns the average available bandwidth in MBytes/s
800 static u32
dce_v6_0_average_bandwidth(struct dce6_wm_params
*wm
)
802 /* Calculate the display mode Average Bandwidth
803 * DisplayMode should contain the source and destination dimensions,
807 fixed20_12 line_time
;
808 fixed20_12 src_width
;
809 fixed20_12 bandwidth
;
812 a
.full
= dfixed_const(1000);
813 line_time
.full
= dfixed_const(wm
->active_time
+ wm
->blank_time
);
814 line_time
.full
= dfixed_div(line_time
, a
);
815 bpp
.full
= dfixed_const(wm
->bytes_per_pixel
);
816 src_width
.full
= dfixed_const(wm
->src_width
);
817 bandwidth
.full
= dfixed_mul(src_width
, bpp
);
818 bandwidth
.full
= dfixed_mul(bandwidth
, wm
->vsc
);
819 bandwidth
.full
= dfixed_div(bandwidth
, line_time
);
821 return dfixed_trunc(bandwidth
);
825 * dce_v6_0_latency_watermark - get the latency watermark
827 * @wm: watermark calculation data
829 * Calculate the latency watermark (CIK).
830 * Used for display watermark bandwidth calculations
831 * Returns the latency watermark in ns
833 static u32
dce_v6_0_latency_watermark(struct dce6_wm_params
*wm
)
835 /* First calculate the latency in ns */
836 u32 mc_latency
= 2000; /* 2000 ns. */
837 u32 available_bandwidth
= dce_v6_0_available_bandwidth(wm
);
838 u32 worst_chunk_return_time
= (512 * 8 * 1000) / available_bandwidth
;
839 u32 cursor_line_pair_return_time
= (128 * 4 * 1000) / available_bandwidth
;
840 u32 dc_latency
= 40000000 / wm
->disp_clk
; /* dc pipe latency */
841 u32 other_heads_data_return_time
= ((wm
->num_heads
+ 1) * worst_chunk_return_time
) +
842 (wm
->num_heads
* cursor_line_pair_return_time
);
843 u32 latency
= mc_latency
+ other_heads_data_return_time
+ dc_latency
;
844 u32 max_src_lines_per_dst_line
, lb_fill_bw
, line_fill_time
;
845 u32 tmp
, dmif_size
= 12288;
848 if (wm
->num_heads
== 0)
851 a
.full
= dfixed_const(2);
852 b
.full
= dfixed_const(1);
853 if ((wm
->vsc
.full
> a
.full
) ||
854 ((wm
->vsc
.full
> b
.full
) && (wm
->vtaps
>= 3)) ||
856 ((wm
->vsc
.full
>= a
.full
) && wm
->interlaced
))
857 max_src_lines_per_dst_line
= 4;
859 max_src_lines_per_dst_line
= 2;
861 a
.full
= dfixed_const(available_bandwidth
);
862 b
.full
= dfixed_const(wm
->num_heads
);
863 a
.full
= dfixed_div(a
, b
);
865 b
.full
= dfixed_const(mc_latency
+ 512);
866 c
.full
= dfixed_const(wm
->disp_clk
);
867 b
.full
= dfixed_div(b
, c
);
869 c
.full
= dfixed_const(dmif_size
);
870 b
.full
= dfixed_div(c
, b
);
872 tmp
= min(dfixed_trunc(a
), dfixed_trunc(b
));
874 b
.full
= dfixed_const(1000);
875 c
.full
= dfixed_const(wm
->disp_clk
);
876 b
.full
= dfixed_div(c
, b
);
877 c
.full
= dfixed_const(wm
->bytes_per_pixel
);
878 b
.full
= dfixed_mul(b
, c
);
880 lb_fill_bw
= min(tmp
, dfixed_trunc(b
));
882 a
.full
= dfixed_const(max_src_lines_per_dst_line
* wm
->src_width
* wm
->bytes_per_pixel
);
883 b
.full
= dfixed_const(1000);
884 c
.full
= dfixed_const(lb_fill_bw
);
885 b
.full
= dfixed_div(c
, b
);
886 a
.full
= dfixed_div(a
, b
);
887 line_fill_time
= dfixed_trunc(a
);
889 if (line_fill_time
< wm
->active_time
)
892 return latency
+ (line_fill_time
- wm
->active_time
);
897 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
898 * average and available dram bandwidth
900 * @wm: watermark calculation data
902 * Check if the display average bandwidth fits in the display
903 * dram bandwidth (CIK).
904 * Used for display watermark bandwidth calculations
905 * Returns true if the display fits, false if not.
907 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params
*wm
)
909 if (dce_v6_0_average_bandwidth(wm
) <=
910 (dce_v6_0_dram_bandwidth_for_display(wm
) / wm
->num_heads
))
917 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
918 * average and available bandwidth
920 * @wm: watermark calculation data
922 * Check if the display average bandwidth fits in the display
923 * available bandwidth (CIK).
924 * Used for display watermark bandwidth calculations
925 * Returns true if the display fits, false if not.
927 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params
*wm
)
929 if (dce_v6_0_average_bandwidth(wm
) <=
930 (dce_v6_0_available_bandwidth(wm
) / wm
->num_heads
))
937 * dce_v6_0_check_latency_hiding - check latency hiding
939 * @wm: watermark calculation data
941 * Check latency hiding (CIK).
942 * Used for display watermark bandwidth calculations
943 * Returns true if the display fits, false if not.
945 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params
*wm
)
947 u32 lb_partitions
= wm
->lb_size
/ wm
->src_width
;
948 u32 line_time
= wm
->active_time
+ wm
->blank_time
;
949 u32 latency_tolerant_lines
;
953 a
.full
= dfixed_const(1);
954 if (wm
->vsc
.full
> a
.full
)
955 latency_tolerant_lines
= 1;
957 if (lb_partitions
<= (wm
->vtaps
+ 1))
958 latency_tolerant_lines
= 1;
960 latency_tolerant_lines
= 2;
963 latency_hiding
= (latency_tolerant_lines
* line_time
+ wm
->blank_time
);
965 if (dce_v6_0_latency_watermark(wm
) <= latency_hiding
)
972 * dce_v6_0_program_watermarks - program display watermarks
974 * @adev: amdgpu_device pointer
975 * @amdgpu_crtc: the selected display controller
976 * @lb_size: line buffer size
977 * @num_heads: number of display controllers in use
979 * Calculate and program the display watermarks for the
980 * selected display controller (CIK).
982 static void dce_v6_0_program_watermarks(struct amdgpu_device
*adev
,
983 struct amdgpu_crtc
*amdgpu_crtc
,
984 u32 lb_size
, u32 num_heads
)
986 struct drm_display_mode
*mode
= &amdgpu_crtc
->base
.mode
;
987 struct dce6_wm_params wm_low
, wm_high
;
991 u32 latency_watermark_a
= 0, latency_watermark_b
= 0;
992 u32 priority_a_mark
= 0, priority_b_mark
= 0;
993 u32 priority_a_cnt
= PRIORITY_OFF
;
994 u32 priority_b_cnt
= PRIORITY_OFF
;
995 u32 tmp
, arb_control3
;
998 if (amdgpu_crtc
->base
.enabled
&& num_heads
&& mode
) {
999 pixel_period
= 1000000 / (u32
)mode
->clock
;
1000 line_time
= min((u32
)mode
->crtc_htotal
* pixel_period
, (u32
)65535);
1004 dram_channels
= si_get_number_of_dram_channels(adev
);
1006 /* watermark for high clocks */
1007 if (adev
->pm
.dpm_enabled
) {
1009 amdgpu_dpm_get_mclk(adev
, false) * 10;
1011 amdgpu_dpm_get_sclk(adev
, false) * 10;
1013 wm_high
.yclk
= adev
->pm
.current_mclk
* 10;
1014 wm_high
.sclk
= adev
->pm
.current_sclk
* 10;
1017 wm_high
.disp_clk
= mode
->clock
;
1018 wm_high
.src_width
= mode
->crtc_hdisplay
;
1019 wm_high
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
1020 wm_high
.blank_time
= line_time
- wm_high
.active_time
;
1021 wm_high
.interlaced
= false;
1022 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1023 wm_high
.interlaced
= true;
1024 wm_high
.vsc
= amdgpu_crtc
->vsc
;
1026 if (amdgpu_crtc
->rmx_type
!= RMX_OFF
)
1028 wm_high
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
1029 wm_high
.lb_size
= lb_size
;
1030 wm_high
.dram_channels
= dram_channels
;
1031 wm_high
.num_heads
= num_heads
;
1033 if (adev
->pm
.dpm_enabled
) {
1034 /* watermark for low clocks */
1036 amdgpu_dpm_get_mclk(adev
, true) * 10;
1038 amdgpu_dpm_get_sclk(adev
, true) * 10;
1040 wm_low
.yclk
= adev
->pm
.current_mclk
* 10;
1041 wm_low
.sclk
= adev
->pm
.current_sclk
* 10;
1044 wm_low
.disp_clk
= mode
->clock
;
1045 wm_low
.src_width
= mode
->crtc_hdisplay
;
1046 wm_low
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
1047 wm_low
.blank_time
= line_time
- wm_low
.active_time
;
1048 wm_low
.interlaced
= false;
1049 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1050 wm_low
.interlaced
= true;
1051 wm_low
.vsc
= amdgpu_crtc
->vsc
;
1053 if (amdgpu_crtc
->rmx_type
!= RMX_OFF
)
1055 wm_low
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
1056 wm_low
.lb_size
= lb_size
;
1057 wm_low
.dram_channels
= dram_channels
;
1058 wm_low
.num_heads
= num_heads
;
1060 /* set for high clocks */
1061 latency_watermark_a
= min(dce_v6_0_latency_watermark(&wm_high
), (u32
)65535);
1062 /* set for low clocks */
1063 latency_watermark_b
= min(dce_v6_0_latency_watermark(&wm_low
), (u32
)65535);
1065 /* possibly force display priority to high */
1066 /* should really do this at mode validation time... */
1067 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high
) ||
1068 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high
) ||
1069 !dce_v6_0_check_latency_hiding(&wm_high
) ||
1070 (adev
->mode_info
.disp_priority
== 2)) {
1071 DRM_DEBUG_KMS("force priority to high\n");
1072 priority_a_cnt
|= PRIORITY_ALWAYS_ON
;
1073 priority_b_cnt
|= PRIORITY_ALWAYS_ON
;
1075 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low
) ||
1076 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low
) ||
1077 !dce_v6_0_check_latency_hiding(&wm_low
) ||
1078 (adev
->mode_info
.disp_priority
== 2)) {
1079 DRM_DEBUG_KMS("force priority to high\n");
1080 priority_a_cnt
|= PRIORITY_ALWAYS_ON
;
1081 priority_b_cnt
|= PRIORITY_ALWAYS_ON
;
1084 a
.full
= dfixed_const(1000);
1085 b
.full
= dfixed_const(mode
->clock
);
1086 b
.full
= dfixed_div(b
, a
);
1087 c
.full
= dfixed_const(latency_watermark_a
);
1088 c
.full
= dfixed_mul(c
, b
);
1089 c
.full
= dfixed_mul(c
, amdgpu_crtc
->hsc
);
1090 c
.full
= dfixed_div(c
, a
);
1091 a
.full
= dfixed_const(16);
1092 c
.full
= dfixed_div(c
, a
);
1093 priority_a_mark
= dfixed_trunc(c
);
1094 priority_a_cnt
|= priority_a_mark
& PRIORITY_MARK_MASK
;
1096 a
.full
= dfixed_const(1000);
1097 b
.full
= dfixed_const(mode
->clock
);
1098 b
.full
= dfixed_div(b
, a
);
1099 c
.full
= dfixed_const(latency_watermark_b
);
1100 c
.full
= dfixed_mul(c
, b
);
1101 c
.full
= dfixed_mul(c
, amdgpu_crtc
->hsc
);
1102 c
.full
= dfixed_div(c
, a
);
1103 a
.full
= dfixed_const(16);
1104 c
.full
= dfixed_div(c
, a
);
1105 priority_b_mark
= dfixed_trunc(c
);
1106 priority_b_cnt
|= priority_b_mark
& PRIORITY_MARK_MASK
;
1110 arb_control3
= RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3
+ amdgpu_crtc
->crtc_offset
);
1112 tmp
&= ~LATENCY_WATERMARK_MASK(3);
1113 tmp
|= LATENCY_WATERMARK_MASK(1);
1114 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3
+ amdgpu_crtc
->crtc_offset
, tmp
);
1115 WREG32(mmDPG_PIPE_URGENCY_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1116 ((latency_watermark_a
<< DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT
) |
1117 (line_time
<< DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT
)));
1119 tmp
= RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3
+ amdgpu_crtc
->crtc_offset
);
1120 tmp
&= ~LATENCY_WATERMARK_MASK(3);
1121 tmp
|= LATENCY_WATERMARK_MASK(2);
1122 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3
+ amdgpu_crtc
->crtc_offset
, tmp
);
1123 WREG32(mmDPG_PIPE_URGENCY_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1124 ((latency_watermark_b
<< DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT
) |
1125 (line_time
<< DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT
)));
1126 /* restore original selection */
1127 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3
+ amdgpu_crtc
->crtc_offset
, arb_control3
);
1129 /* write the priority marks */
1130 WREG32(mmPRIORITY_A_CNT
+ amdgpu_crtc
->crtc_offset
, priority_a_cnt
);
1131 WREG32(mmPRIORITY_B_CNT
+ amdgpu_crtc
->crtc_offset
, priority_b_cnt
);
1133 /* save values for DPM */
1134 amdgpu_crtc
->line_time
= line_time
;
1135 amdgpu_crtc
->wm_high
= latency_watermark_a
;
1138 /* watermark setup */
1139 static u32
dce_v6_0_line_buffer_adjust(struct amdgpu_device
*adev
,
1140 struct amdgpu_crtc
*amdgpu_crtc
,
1141 struct drm_display_mode
*mode
,
1142 struct drm_display_mode
*other_mode
)
1144 u32 tmp
, buffer_alloc
, i
;
1145 u32 pipe_offset
= amdgpu_crtc
->crtc_id
* 0x8;
1148 * There are 3 line buffers, each one shared by 2 display controllers.
1149 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1150 * the display controllers. The paritioning is done via one of four
1151 * preset allocations specified in bits 21:20:
1153 * 2 - whole lb, other crtc must be disabled
1155 /* this can get tricky if we have two large displays on a paired group
1156 * of crtcs. Ideally for multiple large displays we'd assign them to
1157 * non-linked crtcs for maximum line buffer allocation.
1159 if (amdgpu_crtc
->base
.enabled
&& mode
) {
1164 tmp
= 2; /* whole */
1172 WREG32(mmDC_LB_MEMORY_SPLIT
+ amdgpu_crtc
->crtc_offset
,
1173 DC_LB_MEMORY_CONFIG(tmp
));
1175 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL
+ pipe_offset
,
1176 (buffer_alloc
<< PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT
));
1177 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1178 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL
+ pipe_offset
) &
1179 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK
)
1184 if (amdgpu_crtc
->base
.enabled
&& mode
) {
1194 /* controller not enabled, so no lb used */
1201 * dce_v6_0_bandwidth_update - program display watermarks
1203 * @adev: amdgpu_device pointer
1205 * Calculate and program the display watermarks and line
1206 * buffer allocation (CIK).
1208 static void dce_v6_0_bandwidth_update(struct amdgpu_device
*adev
)
1210 struct drm_display_mode
*mode0
= NULL
;
1211 struct drm_display_mode
*mode1
= NULL
;
1212 u32 num_heads
= 0, lb_size
;
1215 if (!adev
->mode_info
.mode_config_initialized
)
1218 amdgpu_update_display_priority(adev
);
1220 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
1221 if (adev
->mode_info
.crtcs
[i
]->base
.enabled
)
1224 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
+= 2) {
1225 mode0
= &adev
->mode_info
.crtcs
[i
]->base
.mode
;
1226 mode1
= &adev
->mode_info
.crtcs
[i
+1]->base
.mode
;
1227 lb_size
= dce_v6_0_line_buffer_adjust(adev
, adev
->mode_info
.crtcs
[i
], mode0
, mode1
);
1228 dce_v6_0_program_watermarks(adev
, adev
->mode_info
.crtcs
[i
], lb_size
, num_heads
);
1229 lb_size
= dce_v6_0_line_buffer_adjust(adev
, adev
->mode_info
.crtcs
[i
+1], mode1
, mode0
);
1230 dce_v6_0_program_watermarks(adev
, adev
->mode_info
.crtcs
[i
+1], lb_size
, num_heads
);
1234 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1239 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1240 offset = adev->mode_info.audio.pin[i].offset;
1241 tmp = RREG32_AUDIO_ENDPT(offset,
1242 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1243 if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1244 adev->mode_info.audio.pin[i].connected = false;
1246 adev->mode_info.audio.pin[i].connected = true;
1251 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1255 dce_v6_0_audio_get_connected_pins(adev);
1257 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1258 if (adev->mode_info.audio.pin[i].connected)
1259 return &adev->mode_info.audio.pin[i];
1261 DRM_ERROR("No connected audio pins found!\n");
1265 static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1267 struct amdgpu_device *adev = encoder->dev->dev_private;
1268 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1269 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1272 if (!dig || !dig->afmt || !dig->afmt->pin)
1275 offset = dig->afmt->offset;
1277 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1278 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1282 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1283 struct drm_display_mode *mode)
1285 DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
1288 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1290 DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
1293 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1295 DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
1299 static void dce_v6_0_audio_enable(struct amdgpu_device
*adev
,
1300 struct amdgpu_audio_pin
*pin
,
1303 DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
1306 static const u32 pin_offsets
[7] =
1317 static int dce_v6_0_audio_init(struct amdgpu_device
*adev
)
1322 static void dce_v6_0_audio_fini(struct amdgpu_device
*adev
)
1328 static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1330 DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
1334 * build a HDMI Video Info Frame
1337 static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1338 void *buffer, size_t size)
1340 DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
1343 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1345 DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
1349 * update the info frames with the data from the current display mode
1351 static void dce_v6_0_afmt_setmode(struct drm_encoder
*encoder
,
1352 struct drm_display_mode
*mode
)
1354 DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
1357 static void dce_v6_0_afmt_enable(struct drm_encoder
*encoder
, bool enable
)
1359 struct drm_device
*dev
= encoder
->dev
;
1360 struct amdgpu_device
*adev
= dev
->dev_private
;
1361 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1362 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1364 if (!dig
|| !dig
->afmt
)
1367 /* Silent, r600_hdmi_enable will raise WARN for us */
1368 if (enable
&& dig
->afmt
->enabled
)
1370 if (!enable
&& !dig
->afmt
->enabled
)
1373 if (!enable
&& dig
->afmt
->pin
) {
1374 dce_v6_0_audio_enable(adev
, dig
->afmt
->pin
, false);
1375 dig
->afmt
->pin
= NULL
;
1378 dig
->afmt
->enabled
= enable
;
1380 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1381 enable
? "En" : "Dis", dig
->afmt
->offset
, amdgpu_encoder
->encoder_id
);
1384 static int dce_v6_0_afmt_init(struct amdgpu_device
*adev
)
1388 for (i
= 0; i
< adev
->mode_info
.num_dig
; i
++)
1389 adev
->mode_info
.afmt
[i
] = NULL
;
1391 /* DCE6 has audio blocks tied to DIG encoders */
1392 for (i
= 0; i
< adev
->mode_info
.num_dig
; i
++) {
1393 adev
->mode_info
.afmt
[i
] = kzalloc(sizeof(struct amdgpu_afmt
), GFP_KERNEL
);
1394 if (adev
->mode_info
.afmt
[i
]) {
1395 adev
->mode_info
.afmt
[i
]->offset
= dig_offsets
[i
];
1396 adev
->mode_info
.afmt
[i
]->id
= i
;
1398 for (j
= 0; j
< i
; j
++) {
1399 kfree(adev
->mode_info
.afmt
[j
]);
1400 adev
->mode_info
.afmt
[j
] = NULL
;
1402 DRM_ERROR("Out of memory allocating afmt table\n");
1409 static void dce_v6_0_afmt_fini(struct amdgpu_device
*adev
)
1413 for (i
= 0; i
< adev
->mode_info
.num_dig
; i
++) {
1414 kfree(adev
->mode_info
.afmt
[i
]);
1415 adev
->mode_info
.afmt
[i
] = NULL
;
1419 static const u32 vga_control_regs
[6] =
1429 static void dce_v6_0_vga_enable(struct drm_crtc
*crtc
, bool enable
)
1431 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1432 struct drm_device
*dev
= crtc
->dev
;
1433 struct amdgpu_device
*adev
= dev
->dev_private
;
1436 vga_control
= RREG32(vga_control_regs
[amdgpu_crtc
->crtc_id
]) & ~1;
1437 WREG32(vga_control_regs
[amdgpu_crtc
->crtc_id
], vga_control
| (enable
? 1 : 0));
1440 static void dce_v6_0_grph_enable(struct drm_crtc
*crtc
, bool enable
)
1442 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1443 struct drm_device
*dev
= crtc
->dev
;
1444 struct amdgpu_device
*adev
= dev
->dev_private
;
1446 WREG32(mmGRPH_ENABLE
+ amdgpu_crtc
->crtc_offset
, enable
? 1 : 0);
1449 static int dce_v6_0_crtc_do_set_base(struct drm_crtc
*crtc
,
1450 struct drm_framebuffer
*fb
,
1451 int x
, int y
, int atomic
)
1453 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1454 struct drm_device
*dev
= crtc
->dev
;
1455 struct amdgpu_device
*adev
= dev
->dev_private
;
1456 struct amdgpu_framebuffer
*amdgpu_fb
;
1457 struct drm_framebuffer
*target_fb
;
1458 struct drm_gem_object
*obj
;
1459 struct amdgpu_bo
*abo
;
1460 uint64_t fb_location
, tiling_flags
;
1461 uint32_t fb_format
, fb_pitch_pixels
, pipe_config
;
1462 u32 fb_swap
= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE
);
1463 u32 viewport_w
, viewport_h
;
1465 bool bypass_lut
= false;
1466 struct drm_format_name_buf format_name
;
1469 if (!atomic
&& !crtc
->primary
->fb
) {
1470 DRM_DEBUG_KMS("No FB bound\n");
1475 amdgpu_fb
= to_amdgpu_framebuffer(fb
);
1478 amdgpu_fb
= to_amdgpu_framebuffer(crtc
->primary
->fb
);
1479 target_fb
= crtc
->primary
->fb
;
1482 /* If atomic, assume fb object is pinned & idle & fenced and
1483 * just update base pointers
1485 obj
= amdgpu_fb
->obj
;
1486 abo
= gem_to_amdgpu_bo(obj
);
1487 r
= amdgpu_bo_reserve(abo
, false);
1488 if (unlikely(r
!= 0))
1492 fb_location
= amdgpu_bo_gpu_offset(abo
);
1494 r
= amdgpu_bo_pin(abo
, AMDGPU_GEM_DOMAIN_VRAM
, &fb_location
);
1495 if (unlikely(r
!= 0)) {
1496 amdgpu_bo_unreserve(abo
);
1501 amdgpu_bo_get_tiling_flags(abo
, &tiling_flags
);
1502 amdgpu_bo_unreserve(abo
);
1504 switch (target_fb
->format
->format
) {
1506 fb_format
= (GRPH_DEPTH(GRPH_DEPTH_8BPP
) |
1507 GRPH_FORMAT(GRPH_FORMAT_INDEXED
));
1509 case DRM_FORMAT_XRGB4444
:
1510 case DRM_FORMAT_ARGB4444
:
1511 fb_format
= (GRPH_DEPTH(GRPH_DEPTH_16BPP
) |
1512 GRPH_FORMAT(GRPH_FORMAT_ARGB4444
));
1514 fb_swap
= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16
);
1517 case DRM_FORMAT_XRGB1555
:
1518 case DRM_FORMAT_ARGB1555
:
1519 fb_format
= (GRPH_DEPTH(GRPH_DEPTH_16BPP
) |
1520 GRPH_FORMAT(GRPH_FORMAT_ARGB1555
));
1522 fb_swap
= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16
);
1525 case DRM_FORMAT_BGRX5551
:
1526 case DRM_FORMAT_BGRA5551
:
1527 fb_format
= (GRPH_DEPTH(GRPH_DEPTH_16BPP
) |
1528 GRPH_FORMAT(GRPH_FORMAT_BGRA5551
));
1530 fb_swap
= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16
);
1533 case DRM_FORMAT_RGB565
:
1534 fb_format
= (GRPH_DEPTH(GRPH_DEPTH_16BPP
) |
1535 GRPH_FORMAT(GRPH_FORMAT_ARGB565
));
1537 fb_swap
= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16
);
1540 case DRM_FORMAT_XRGB8888
:
1541 case DRM_FORMAT_ARGB8888
:
1542 fb_format
= (GRPH_DEPTH(GRPH_DEPTH_32BPP
) |
1543 GRPH_FORMAT(GRPH_FORMAT_ARGB8888
));
1545 fb_swap
= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32
);
1548 case DRM_FORMAT_XRGB2101010
:
1549 case DRM_FORMAT_ARGB2101010
:
1550 fb_format
= (GRPH_DEPTH(GRPH_DEPTH_32BPP
) |
1551 GRPH_FORMAT(GRPH_FORMAT_ARGB2101010
));
1553 fb_swap
= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32
);
1555 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1558 case DRM_FORMAT_BGRX1010102
:
1559 case DRM_FORMAT_BGRA1010102
:
1560 fb_format
= (GRPH_DEPTH(GRPH_DEPTH_32BPP
) |
1561 GRPH_FORMAT(GRPH_FORMAT_BGRA1010102
));
1563 fb_swap
= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32
);
1565 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1569 DRM_ERROR("Unsupported screen format %s\n",
1570 drm_get_format_name(target_fb
->format
->format
, &format_name
));
1574 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == ARRAY_2D_TILED_THIN1
) {
1575 unsigned bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
1577 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1578 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1579 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1580 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
1581 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1583 fb_format
|= GRPH_NUM_BANKS(num_banks
);
1584 fb_format
|= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1
);
1585 fb_format
|= GRPH_TILE_SPLIT(tile_split
);
1586 fb_format
|= GRPH_BANK_WIDTH(bankw
);
1587 fb_format
|= GRPH_BANK_HEIGHT(bankh
);
1588 fb_format
|= GRPH_MACRO_TILE_ASPECT(mtaspect
);
1589 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == ARRAY_1D_TILED_THIN1
) {
1590 fb_format
|= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1
);
1593 pipe_config
= AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1594 fb_format
|= GRPH_PIPE_CONFIG(pipe_config
);
1596 dce_v6_0_vga_enable(crtc
, false);
1598 /* Make sure surface address is updated at vertical blank rather than
1601 WREG32(mmGRPH_FLIP_CONTROL
+ amdgpu_crtc
->crtc_offset
, 0);
1603 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
1604 upper_32_bits(fb_location
));
1605 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
1606 upper_32_bits(fb_location
));
1607 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
1608 (u32
)fb_location
& GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK
);
1609 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
1610 (u32
) fb_location
& GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK
);
1611 WREG32(mmGRPH_CONTROL
+ amdgpu_crtc
->crtc_offset
, fb_format
);
1612 WREG32(mmGRPH_SWAP_CNTL
+ amdgpu_crtc
->crtc_offset
, fb_swap
);
1615 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1616 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1617 * retain the full precision throughout the pipeline.
1619 WREG32_P(mmGRPH_LUT_10BIT_BYPASS
+ amdgpu_crtc
->crtc_offset
,
1620 (bypass_lut
? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK
: 0),
1621 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK
);
1624 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1626 WREG32(mmGRPH_SURFACE_OFFSET_X
+ amdgpu_crtc
->crtc_offset
, 0);
1627 WREG32(mmGRPH_SURFACE_OFFSET_Y
+ amdgpu_crtc
->crtc_offset
, 0);
1628 WREG32(mmGRPH_X_START
+ amdgpu_crtc
->crtc_offset
, 0);
1629 WREG32(mmGRPH_Y_START
+ amdgpu_crtc
->crtc_offset
, 0);
1630 WREG32(mmGRPH_X_END
+ amdgpu_crtc
->crtc_offset
, target_fb
->width
);
1631 WREG32(mmGRPH_Y_END
+ amdgpu_crtc
->crtc_offset
, target_fb
->height
);
1633 fb_pitch_pixels
= target_fb
->pitches
[0] / target_fb
->format
->cpp
[0];
1634 WREG32(mmGRPH_PITCH
+ amdgpu_crtc
->crtc_offset
, fb_pitch_pixels
);
1636 dce_v6_0_grph_enable(crtc
, true);
1638 WREG32(mmDESKTOP_HEIGHT
+ amdgpu_crtc
->crtc_offset
,
1642 WREG32(mmVIEWPORT_START
+ amdgpu_crtc
->crtc_offset
,
1644 viewport_w
= crtc
->mode
.hdisplay
;
1645 viewport_h
= (crtc
->mode
.vdisplay
+ 1) & ~1;
1647 WREG32(mmVIEWPORT_SIZE
+ amdgpu_crtc
->crtc_offset
,
1648 (viewport_w
<< 16) | viewport_h
);
1650 /* set pageflip to happen anywhere in vblank interval */
1651 WREG32(mmMASTER_UPDATE_MODE
+ amdgpu_crtc
->crtc_offset
, 0);
1653 if (!atomic
&& fb
&& fb
!= crtc
->primary
->fb
) {
1654 amdgpu_fb
= to_amdgpu_framebuffer(fb
);
1655 abo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
1656 r
= amdgpu_bo_reserve(abo
, false);
1657 if (unlikely(r
!= 0))
1659 amdgpu_bo_unpin(abo
);
1660 amdgpu_bo_unreserve(abo
);
1663 /* Bytes per pixel may have changed */
1664 dce_v6_0_bandwidth_update(adev
);
1670 static void dce_v6_0_set_interleave(struct drm_crtc
*crtc
,
1671 struct drm_display_mode
*mode
)
1673 struct drm_device
*dev
= crtc
->dev
;
1674 struct amdgpu_device
*adev
= dev
->dev_private
;
1675 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1677 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
1678 WREG32(mmDATA_FORMAT
+ amdgpu_crtc
->crtc_offset
,
1681 WREG32(mmDATA_FORMAT
+ amdgpu_crtc
->crtc_offset
, 0);
1684 static void dce_v6_0_crtc_load_lut(struct drm_crtc
*crtc
)
1687 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1688 struct drm_device
*dev
= crtc
->dev
;
1689 struct amdgpu_device
*adev
= dev
->dev_private
;
1692 DRM_DEBUG_KMS("%d\n", amdgpu_crtc
->crtc_id
);
1694 WREG32(mmINPUT_CSC_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1695 ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT
) |
1696 (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT
)));
1697 WREG32(mmPRESCALE_GRPH_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1698 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK
);
1699 WREG32(mmPRESCALE_OVL_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1700 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK
);
1701 WREG32(mmINPUT_GAMMA_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1702 ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT
) |
1703 (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT
)));
1705 WREG32(mmDC_LUT_CONTROL
+ amdgpu_crtc
->crtc_offset
, 0);
1707 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE
+ amdgpu_crtc
->crtc_offset
, 0);
1708 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN
+ amdgpu_crtc
->crtc_offset
, 0);
1709 WREG32(mmDC_LUT_BLACK_OFFSET_RED
+ amdgpu_crtc
->crtc_offset
, 0);
1711 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE
+ amdgpu_crtc
->crtc_offset
, 0xffff);
1712 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN
+ amdgpu_crtc
->crtc_offset
, 0xffff);
1713 WREG32(mmDC_LUT_WHITE_OFFSET_RED
+ amdgpu_crtc
->crtc_offset
, 0xffff);
1715 WREG32(mmDC_LUT_RW_MODE
+ amdgpu_crtc
->crtc_offset
, 0);
1716 WREG32(mmDC_LUT_WRITE_EN_MASK
+ amdgpu_crtc
->crtc_offset
, 0x00000007);
1718 WREG32(mmDC_LUT_RW_INDEX
+ amdgpu_crtc
->crtc_offset
, 0);
1719 for (i
= 0; i
< 256; i
++) {
1720 WREG32(mmDC_LUT_30_COLOR
+ amdgpu_crtc
->crtc_offset
,
1721 (amdgpu_crtc
->lut_r
[i
] << 20) |
1722 (amdgpu_crtc
->lut_g
[i
] << 10) |
1723 (amdgpu_crtc
->lut_b
[i
] << 0));
1726 WREG32(mmDEGAMMA_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1727 ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT
) |
1728 (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT
) |
1729 ICON_DEGAMMA_MODE(0) |
1730 (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT
)));
1731 WREG32(mmGAMUT_REMAP_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1732 ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT
) |
1733 (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT
)));
1734 WREG32(mmREGAMMA_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1735 ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT
) |
1736 (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT
)));
1737 WREG32(mmOUTPUT_CSC_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1738 ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT
) |
1739 (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT
)));
1740 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
1741 WREG32(0x1a50 + amdgpu_crtc
->crtc_offset
, 0);
1746 static int dce_v6_0_pick_dig_encoder(struct drm_encoder
*encoder
)
1748 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
1749 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
1751 switch (amdgpu_encoder
->encoder_id
) {
1752 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY
:
1753 return dig
->linkb
? 1 : 0;
1754 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1
:
1755 return dig
->linkb
? 3 : 2;
1756 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2
:
1757 return dig
->linkb
? 5 : 4;
1758 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3
:
1761 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder
->encoder_id
);
1767 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1771 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
1772 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
1773 * monitors a dedicated PPLL must be used. If a particular board has
1774 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1775 * as there is no need to program the PLL itself. If we are not able to
1776 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1777 * avoid messing up an existing monitor.
1781 static u32
dce_v6_0_pick_pll(struct drm_crtc
*crtc
)
1783 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1784 struct drm_device
*dev
= crtc
->dev
;
1785 struct amdgpu_device
*adev
= dev
->dev_private
;
1789 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc
->encoder
))) {
1790 if (adev
->clock
.dp_extclk
)
1791 /* skip PPLL programming if using ext clock */
1792 return ATOM_PPLL_INVALID
;
1796 /* use the same PPLL for all monitors with the same clock */
1797 pll
= amdgpu_pll_get_shared_nondp_ppll(crtc
);
1798 if (pll
!= ATOM_PPLL_INVALID
)
1802 /* PPLL1, and PPLL2 */
1803 pll_in_use
= amdgpu_pll_get_use_mask(crtc
);
1804 if (!(pll_in_use
& (1 << ATOM_PPLL2
)))
1806 if (!(pll_in_use
& (1 << ATOM_PPLL1
)))
1808 DRM_ERROR("unable to allocate a PPLL\n");
1809 return ATOM_PPLL_INVALID
;
1812 static void dce_v6_0_lock_cursor(struct drm_crtc
*crtc
, bool lock
)
1814 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
1815 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1818 cur_lock
= RREG32(mmCUR_UPDATE
+ amdgpu_crtc
->crtc_offset
);
1820 cur_lock
|= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK
;
1822 cur_lock
&= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK
;
1823 WREG32(mmCUR_UPDATE
+ amdgpu_crtc
->crtc_offset
, cur_lock
);
1826 static void dce_v6_0_hide_cursor(struct drm_crtc
*crtc
)
1828 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1829 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
1831 WREG32_IDX(mmCUR_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1832 (CURSOR_24_8_PRE_MULT
<< CUR_CONTROL__CURSOR_MODE__SHIFT
) |
1833 (CURSOR_URGENT_1_2
<< CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT
));
1838 static void dce_v6_0_show_cursor(struct drm_crtc
*crtc
)
1840 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1841 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
1843 WREG32(mmCUR_SURFACE_ADDRESS_HIGH
+ amdgpu_crtc
->crtc_offset
,
1844 upper_32_bits(amdgpu_crtc
->cursor_addr
));
1845 WREG32(mmCUR_SURFACE_ADDRESS
+ amdgpu_crtc
->crtc_offset
,
1846 lower_32_bits(amdgpu_crtc
->cursor_addr
));
1848 WREG32_IDX(mmCUR_CONTROL
+ amdgpu_crtc
->crtc_offset
,
1849 CUR_CONTROL__CURSOR_EN_MASK
|
1850 (CURSOR_24_8_PRE_MULT
<< CUR_CONTROL__CURSOR_MODE__SHIFT
) |
1851 (CURSOR_URGENT_1_2
<< CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT
));
1855 static int dce_v6_0_cursor_move_locked(struct drm_crtc
*crtc
,
1858 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1859 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
1860 int xorigin
= 0, yorigin
= 0;
1862 amdgpu_crtc
->cursor_x
= x
;
1863 amdgpu_crtc
->cursor_y
= y
;
1865 /* avivo cursor are offset into the total surface */
1868 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x
, y
, crtc
->x
, crtc
->y
);
1871 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
1875 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
1879 WREG32(mmCUR_POSITION
+ amdgpu_crtc
->crtc_offset
, (x
<< 16) | y
);
1880 WREG32(mmCUR_HOT_SPOT
+ amdgpu_crtc
->crtc_offset
, (xorigin
<< 16) | yorigin
);
1885 static int dce_v6_0_crtc_cursor_move(struct drm_crtc
*crtc
,
1890 dce_v6_0_lock_cursor(crtc
, true);
1891 ret
= dce_v6_0_cursor_move_locked(crtc
, x
, y
);
1892 dce_v6_0_lock_cursor(crtc
, false);
1897 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc
*crtc
,
1898 struct drm_file
*file_priv
,
1905 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1906 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
1907 struct drm_gem_object
*obj
;
1908 struct amdgpu_bo
*aobj
;
1912 /* turn off cursor */
1913 dce_v6_0_hide_cursor(crtc
);
1918 if ((width
> amdgpu_crtc
->max_cursor_width
) ||
1919 (height
> amdgpu_crtc
->max_cursor_height
)) {
1920 DRM_ERROR("bad cursor width or height %d x %d\n", width
, height
);
1924 obj
= drm_gem_object_lookup(file_priv
, handle
);
1926 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle
, amdgpu_crtc
->crtc_id
);
1930 aobj
= gem_to_amdgpu_bo(obj
);
1931 ret
= amdgpu_bo_reserve(aobj
, false);
1933 drm_gem_object_unreference_unlocked(obj
);
1937 ret
= amdgpu_bo_pin(aobj
, AMDGPU_GEM_DOMAIN_VRAM
, &amdgpu_crtc
->cursor_addr
);
1938 amdgpu_bo_unreserve(aobj
);
1940 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret
);
1941 drm_gem_object_unreference_unlocked(obj
);
1945 dce_v6_0_lock_cursor(crtc
, true);
1947 if (width
!= amdgpu_crtc
->cursor_width
||
1948 height
!= amdgpu_crtc
->cursor_height
||
1949 hot_x
!= amdgpu_crtc
->cursor_hot_x
||
1950 hot_y
!= amdgpu_crtc
->cursor_hot_y
) {
1953 x
= amdgpu_crtc
->cursor_x
+ amdgpu_crtc
->cursor_hot_x
- hot_x
;
1954 y
= amdgpu_crtc
->cursor_y
+ amdgpu_crtc
->cursor_hot_y
- hot_y
;
1956 dce_v6_0_cursor_move_locked(crtc
, x
, y
);
1958 amdgpu_crtc
->cursor_width
= width
;
1959 amdgpu_crtc
->cursor_height
= height
;
1960 amdgpu_crtc
->cursor_hot_x
= hot_x
;
1961 amdgpu_crtc
->cursor_hot_y
= hot_y
;
1964 if (width
!= amdgpu_crtc
->cursor_width
||
1965 height
!= amdgpu_crtc
->cursor_height
) {
1966 WREG32(mmCUR_SIZE
+ amdgpu_crtc
->crtc_offset
,
1967 (width
- 1) << 16 | (height
- 1));
1968 amdgpu_crtc
->cursor_width
= width
;
1969 amdgpu_crtc
->cursor_height
= height
;
1972 dce_v6_0_show_cursor(crtc
);
1973 dce_v6_0_lock_cursor(crtc
, false);
1976 if (amdgpu_crtc
->cursor_bo
) {
1977 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
1978 ret
= amdgpu_bo_reserve(aobj
, false);
1979 if (likely(ret
== 0)) {
1980 amdgpu_bo_unpin(aobj
);
1981 amdgpu_bo_unreserve(aobj
);
1983 drm_gem_object_unreference_unlocked(amdgpu_crtc
->cursor_bo
);
1986 amdgpu_crtc
->cursor_bo
= obj
;
1990 static void dce_v6_0_cursor_reset(struct drm_crtc
*crtc
)
1992 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1993 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
1995 if (amdgpu_crtc
->cursor_bo
) {
1996 dce_v6_0_lock_cursor(crtc
, true);
1998 dce_v6_0_cursor_move_locked(crtc
, amdgpu_crtc
->cursor_x
,
1999 amdgpu_crtc
->cursor_y
);
2001 WREG32(mmCUR_SIZE
+ amdgpu_crtc
->crtc_offset
,
2002 (amdgpu_crtc
->cursor_width
- 1) << 16 |
2003 (amdgpu_crtc
->cursor_height
- 1));
2005 dce_v6_0_show_cursor(crtc
);
2006 dce_v6_0_lock_cursor(crtc
, false);
2010 static int dce_v6_0_crtc_gamma_set(struct drm_crtc
*crtc
, u16
*red
, u16
*green
,
2011 u16
*blue
, uint32_t size
)
2013 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2016 /* userspace palettes are always correct as is */
2017 for (i
= 0; i
< size
; i
++) {
2018 amdgpu_crtc
->lut_r
[i
] = red
[i
] >> 6;
2019 amdgpu_crtc
->lut_g
[i
] = green
[i
] >> 6;
2020 amdgpu_crtc
->lut_b
[i
] = blue
[i
] >> 6;
2022 dce_v6_0_crtc_load_lut(crtc
);
2027 static void dce_v6_0_crtc_destroy(struct drm_crtc
*crtc
)
2029 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2031 drm_crtc_cleanup(crtc
);
2035 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs
= {
2036 .cursor_set2
= dce_v6_0_crtc_cursor_set2
,
2037 .cursor_move
= dce_v6_0_crtc_cursor_move
,
2038 .gamma_set
= dce_v6_0_crtc_gamma_set
,
2039 .set_config
= amdgpu_crtc_set_config
,
2040 .destroy
= dce_v6_0_crtc_destroy
,
2041 .page_flip_target
= amdgpu_crtc_page_flip_target
,
2044 static void dce_v6_0_crtc_dpms(struct drm_crtc
*crtc
, int mode
)
2046 struct drm_device
*dev
= crtc
->dev
;
2047 struct amdgpu_device
*adev
= dev
->dev_private
;
2048 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2052 case DRM_MODE_DPMS_ON
:
2053 amdgpu_crtc
->enabled
= true;
2054 amdgpu_atombios_crtc_enable(crtc
, ATOM_ENABLE
);
2055 amdgpu_atombios_crtc_blank(crtc
, ATOM_DISABLE
);
2056 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2057 type
= amdgpu_crtc_idx_to_irq_type(adev
, amdgpu_crtc
->crtc_id
);
2058 amdgpu_irq_update(adev
, &adev
->crtc_irq
, type
);
2059 amdgpu_irq_update(adev
, &adev
->pageflip_irq
, type
);
2060 drm_crtc_vblank_on(crtc
);
2061 dce_v6_0_crtc_load_lut(crtc
);
2063 case DRM_MODE_DPMS_STANDBY
:
2064 case DRM_MODE_DPMS_SUSPEND
:
2065 case DRM_MODE_DPMS_OFF
:
2066 drm_crtc_vblank_off(crtc
);
2067 if (amdgpu_crtc
->enabled
)
2068 amdgpu_atombios_crtc_blank(crtc
, ATOM_ENABLE
);
2069 amdgpu_atombios_crtc_enable(crtc
, ATOM_DISABLE
);
2070 amdgpu_crtc
->enabled
= false;
2073 /* adjust pm to dpms */
2074 amdgpu_pm_compute_clocks(adev
);
2077 static void dce_v6_0_crtc_prepare(struct drm_crtc
*crtc
)
2079 /* disable crtc pair power gating before programming */
2080 amdgpu_atombios_crtc_powergate(crtc
, ATOM_DISABLE
);
2081 amdgpu_atombios_crtc_lock(crtc
, ATOM_ENABLE
);
2082 dce_v6_0_crtc_dpms(crtc
, DRM_MODE_DPMS_OFF
);
2085 static void dce_v6_0_crtc_commit(struct drm_crtc
*crtc
)
2087 dce_v6_0_crtc_dpms(crtc
, DRM_MODE_DPMS_ON
);
2088 amdgpu_atombios_crtc_lock(crtc
, ATOM_DISABLE
);
2091 static void dce_v6_0_crtc_disable(struct drm_crtc
*crtc
)
2094 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2095 struct drm_device
*dev
= crtc
->dev
;
2096 struct amdgpu_device
*adev
= dev
->dev_private
;
2097 struct amdgpu_atom_ss ss
;
2100 dce_v6_0_crtc_dpms(crtc
, DRM_MODE_DPMS_OFF
);
2101 if (crtc
->primary
->fb
) {
2103 struct amdgpu_framebuffer
*amdgpu_fb
;
2104 struct amdgpu_bo
*abo
;
2106 amdgpu_fb
= to_amdgpu_framebuffer(crtc
->primary
->fb
);
2107 abo
= gem_to_amdgpu_bo(amdgpu_fb
->obj
);
2108 r
= amdgpu_bo_reserve(abo
, false);
2110 DRM_ERROR("failed to reserve abo before unpin\n");
2112 amdgpu_bo_unpin(abo
);
2113 amdgpu_bo_unreserve(abo
);
2116 /* disable the GRPH */
2117 dce_v6_0_grph_enable(crtc
, false);
2119 amdgpu_atombios_crtc_powergate(crtc
, ATOM_ENABLE
);
2121 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
2122 if (adev
->mode_info
.crtcs
[i
] &&
2123 adev
->mode_info
.crtcs
[i
]->enabled
&&
2124 i
!= amdgpu_crtc
->crtc_id
&&
2125 amdgpu_crtc
->pll_id
== adev
->mode_info
.crtcs
[i
]->pll_id
) {
2126 /* one other crtc is using this pll don't turn
2133 switch (amdgpu_crtc
->pll_id
) {
2136 /* disable the ppll */
2137 amdgpu_atombios_crtc_program_pll(crtc
, amdgpu_crtc
->crtc_id
, amdgpu_crtc
->pll_id
,
2138 0, 0, ATOM_DISABLE
, 0, 0, 0, 0, 0, false, &ss
);
2144 amdgpu_crtc
->pll_id
= ATOM_PPLL_INVALID
;
2145 amdgpu_crtc
->adjusted_clock
= 0;
2146 amdgpu_crtc
->encoder
= NULL
;
2147 amdgpu_crtc
->connector
= NULL
;
2150 static int dce_v6_0_crtc_mode_set(struct drm_crtc
*crtc
,
2151 struct drm_display_mode
*mode
,
2152 struct drm_display_mode
*adjusted_mode
,
2153 int x
, int y
, struct drm_framebuffer
*old_fb
)
2155 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2157 if (!amdgpu_crtc
->adjusted_clock
)
2160 amdgpu_atombios_crtc_set_pll(crtc
, adjusted_mode
);
2161 amdgpu_atombios_crtc_set_dtd_timing(crtc
, adjusted_mode
);
2162 dce_v6_0_crtc_do_set_base(crtc
, old_fb
, x
, y
, 0);
2163 amdgpu_atombios_crtc_overscan_setup(crtc
, mode
, adjusted_mode
);
2164 amdgpu_atombios_crtc_scaler_setup(crtc
);
2165 dce_v6_0_cursor_reset(crtc
);
2166 /* update the hw version fpr dpm */
2167 amdgpu_crtc
->hw_mode
= *adjusted_mode
;
2172 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc
*crtc
,
2173 const struct drm_display_mode
*mode
,
2174 struct drm_display_mode
*adjusted_mode
)
2177 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2178 struct drm_device
*dev
= crtc
->dev
;
2179 struct drm_encoder
*encoder
;
2181 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2182 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
2183 if (encoder
->crtc
== crtc
) {
2184 amdgpu_crtc
->encoder
= encoder
;
2185 amdgpu_crtc
->connector
= amdgpu_get_connector_for_encoder(encoder
);
2189 if ((amdgpu_crtc
->encoder
== NULL
) || (amdgpu_crtc
->connector
== NULL
)) {
2190 amdgpu_crtc
->encoder
= NULL
;
2191 amdgpu_crtc
->connector
= NULL
;
2194 if (!amdgpu_crtc_scaling_mode_fixup(crtc
, mode
, adjusted_mode
))
2196 if (amdgpu_atombios_crtc_prepare_pll(crtc
, adjusted_mode
))
2199 amdgpu_crtc
->pll_id
= dce_v6_0_pick_pll(crtc
);
2200 /* if we can't get a PPLL for a non-DP encoder, fail */
2201 if ((amdgpu_crtc
->pll_id
== ATOM_PPLL_INVALID
) &&
2202 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc
->encoder
)))
2208 static int dce_v6_0_crtc_set_base(struct drm_crtc
*crtc
, int x
, int y
,
2209 struct drm_framebuffer
*old_fb
)
2211 return dce_v6_0_crtc_do_set_base(crtc
, old_fb
, x
, y
, 0);
2214 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc
*crtc
,
2215 struct drm_framebuffer
*fb
,
2216 int x
, int y
, enum mode_set_atomic state
)
2218 return dce_v6_0_crtc_do_set_base(crtc
, fb
, x
, y
, 1);
2221 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs
= {
2222 .dpms
= dce_v6_0_crtc_dpms
,
2223 .mode_fixup
= dce_v6_0_crtc_mode_fixup
,
2224 .mode_set
= dce_v6_0_crtc_mode_set
,
2225 .mode_set_base
= dce_v6_0_crtc_set_base
,
2226 .mode_set_base_atomic
= dce_v6_0_crtc_set_base_atomic
,
2227 .prepare
= dce_v6_0_crtc_prepare
,
2228 .commit
= dce_v6_0_crtc_commit
,
2229 .load_lut
= dce_v6_0_crtc_load_lut
,
2230 .disable
= dce_v6_0_crtc_disable
,
2233 static int dce_v6_0_crtc_init(struct amdgpu_device
*adev
, int index
)
2235 struct amdgpu_crtc
*amdgpu_crtc
;
2238 amdgpu_crtc
= kzalloc(sizeof(struct amdgpu_crtc
) +
2239 (AMDGPUFB_CONN_LIMIT
* sizeof(struct drm_connector
*)), GFP_KERNEL
);
2240 if (amdgpu_crtc
== NULL
)
2243 drm_crtc_init(adev
->ddev
, &amdgpu_crtc
->base
, &dce_v6_0_crtc_funcs
);
2245 drm_mode_crtc_set_gamma_size(&amdgpu_crtc
->base
, 256);
2246 amdgpu_crtc
->crtc_id
= index
;
2247 adev
->mode_info
.crtcs
[index
] = amdgpu_crtc
;
2249 amdgpu_crtc
->max_cursor_width
= CURSOR_WIDTH
;
2250 amdgpu_crtc
->max_cursor_height
= CURSOR_HEIGHT
;
2251 adev
->ddev
->mode_config
.cursor_width
= amdgpu_crtc
->max_cursor_width
;
2252 adev
->ddev
->mode_config
.cursor_height
= amdgpu_crtc
->max_cursor_height
;
2254 for (i
= 0; i
< 256; i
++) {
2255 amdgpu_crtc
->lut_r
[i
] = i
<< 2;
2256 amdgpu_crtc
->lut_g
[i
] = i
<< 2;
2257 amdgpu_crtc
->lut_b
[i
] = i
<< 2;
2260 amdgpu_crtc
->crtc_offset
= crtc_offsets
[amdgpu_crtc
->crtc_id
];
2262 amdgpu_crtc
->pll_id
= ATOM_PPLL_INVALID
;
2263 amdgpu_crtc
->adjusted_clock
= 0;
2264 amdgpu_crtc
->encoder
= NULL
;
2265 amdgpu_crtc
->connector
= NULL
;
2266 drm_crtc_helper_add(&amdgpu_crtc
->base
, &dce_v6_0_crtc_helper_funcs
);
2271 static int dce_v6_0_early_init(void *handle
)
2273 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2275 adev
->audio_endpt_rreg
= &dce_v6_0_audio_endpt_rreg
;
2276 adev
->audio_endpt_wreg
= &dce_v6_0_audio_endpt_wreg
;
2278 dce_v6_0_set_display_funcs(adev
);
2279 dce_v6_0_set_irq_funcs(adev
);
2281 adev
->mode_info
.num_crtc
= dce_v6_0_get_num_crtc(adev
);
2283 switch (adev
->asic_type
) {
2287 adev
->mode_info
.num_hpd
= 6;
2288 adev
->mode_info
.num_dig
= 6;
2291 adev
->mode_info
.num_hpd
= 2;
2292 adev
->mode_info
.num_dig
= 2;
2301 static int dce_v6_0_sw_init(void *handle
)
2305 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2307 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
2308 r
= amdgpu_irq_add_id(adev
, i
+ 1, &adev
->crtc_irq
);
2313 for (i
= 8; i
< 20; i
+= 2) {
2314 r
= amdgpu_irq_add_id(adev
, i
, &adev
->pageflip_irq
);
2320 r
= amdgpu_irq_add_id(adev
, 42, &adev
->hpd_irq
);
2324 adev
->mode_info
.mode_config_initialized
= true;
2326 adev
->ddev
->mode_config
.funcs
= &amdgpu_mode_funcs
;
2327 adev
->ddev
->mode_config
.async_page_flip
= true;
2328 adev
->ddev
->mode_config
.max_width
= 16384;
2329 adev
->ddev
->mode_config
.max_height
= 16384;
2330 adev
->ddev
->mode_config
.preferred_depth
= 24;
2331 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2332 adev
->ddev
->mode_config
.fb_base
= adev
->mc
.aper_base
;
2334 r
= amdgpu_modeset_create_props(adev
);
2338 adev
->ddev
->mode_config
.max_width
= 16384;
2339 adev
->ddev
->mode_config
.max_height
= 16384;
2341 /* allocate crtcs */
2342 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
2343 r
= dce_v6_0_crtc_init(adev
, i
);
2348 ret
= amdgpu_atombios_get_connector_info_from_object_table(adev
);
2350 amdgpu_print_display_setup(adev
->ddev
);
2355 r
= dce_v6_0_afmt_init(adev
);
2359 r
= dce_v6_0_audio_init(adev
);
2363 drm_kms_helper_poll_init(adev
->ddev
);
2368 static int dce_v6_0_sw_fini(void *handle
)
2370 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2372 kfree(adev
->mode_info
.bios_hardcoded_edid
);
2374 drm_kms_helper_poll_fini(adev
->ddev
);
2376 dce_v6_0_audio_fini(adev
);
2377 dce_v6_0_afmt_fini(adev
);
2379 drm_mode_config_cleanup(adev
->ddev
);
2380 adev
->mode_info
.mode_config_initialized
= false;
2385 static int dce_v6_0_hw_init(void *handle
)
2388 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2390 /* init dig PHYs, disp eng pll */
2391 amdgpu_atombios_encoder_init_dig(adev
);
2392 amdgpu_atombios_crtc_set_disp_eng_pll(adev
, adev
->clock
.default_dispclk
);
2394 /* initialize hpd */
2395 dce_v6_0_hpd_init(adev
);
2397 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
2398 dce_v6_0_audio_enable(adev
, &adev
->mode_info
.audio
.pin
[i
], false);
2401 dce_v6_0_pageflip_interrupt_init(adev
);
2406 static int dce_v6_0_hw_fini(void *handle
)
2409 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2411 dce_v6_0_hpd_fini(adev
);
2413 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
2414 dce_v6_0_audio_enable(adev
, &adev
->mode_info
.audio
.pin
[i
], false);
2417 dce_v6_0_pageflip_interrupt_fini(adev
);
2422 static int dce_v6_0_suspend(void *handle
)
2424 return dce_v6_0_hw_fini(handle
);
2427 static int dce_v6_0_resume(void *handle
)
2429 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2432 ret
= dce_v6_0_hw_init(handle
);
2434 /* turn on the BL */
2435 if (adev
->mode_info
.bl_encoder
) {
2436 u8 bl_level
= amdgpu_display_backlight_get_level(adev
,
2437 adev
->mode_info
.bl_encoder
);
2438 amdgpu_display_backlight_set_level(adev
, adev
->mode_info
.bl_encoder
,
2445 static bool dce_v6_0_is_idle(void *handle
)
2450 static int dce_v6_0_wait_for_idle(void *handle
)
2455 static int dce_v6_0_soft_reset(void *handle
)
2457 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2461 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device
*adev
,
2463 enum amdgpu_interrupt_state state
)
2465 u32 reg_block
, interrupt_mask
;
2467 if (crtc
>= adev
->mode_info
.num_crtc
) {
2468 DRM_DEBUG("invalid crtc %d\n", crtc
);
2474 reg_block
= SI_CRTC0_REGISTER_OFFSET
;
2477 reg_block
= SI_CRTC1_REGISTER_OFFSET
;
2480 reg_block
= SI_CRTC2_REGISTER_OFFSET
;
2483 reg_block
= SI_CRTC3_REGISTER_OFFSET
;
2486 reg_block
= SI_CRTC4_REGISTER_OFFSET
;
2489 reg_block
= SI_CRTC5_REGISTER_OFFSET
;
2492 DRM_DEBUG("invalid crtc %d\n", crtc
);
2497 case AMDGPU_IRQ_STATE_DISABLE
:
2498 interrupt_mask
= RREG32(mmINT_MASK
+ reg_block
);
2499 interrupt_mask
&= ~VBLANK_INT_MASK
;
2500 WREG32(mmINT_MASK
+ reg_block
, interrupt_mask
);
2502 case AMDGPU_IRQ_STATE_ENABLE
:
2503 interrupt_mask
= RREG32(mmINT_MASK
+ reg_block
);
2504 interrupt_mask
|= VBLANK_INT_MASK
;
2505 WREG32(mmINT_MASK
+ reg_block
, interrupt_mask
);
2512 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device
*adev
,
2514 enum amdgpu_interrupt_state state
)
2519 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device
*adev
,
2520 struct amdgpu_irq_src
*src
,
2522 enum amdgpu_interrupt_state state
)
2524 u32 dc_hpd_int_cntl
;
2526 if (type
>= adev
->mode_info
.num_hpd
) {
2527 DRM_DEBUG("invalid hdp %d\n", type
);
2532 case AMDGPU_IRQ_STATE_DISABLE
:
2533 dc_hpd_int_cntl
= RREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[type
]);
2534 dc_hpd_int_cntl
&= ~DC_HPDx_INT_EN
;
2535 WREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[type
], dc_hpd_int_cntl
);
2537 case AMDGPU_IRQ_STATE_ENABLE
:
2538 dc_hpd_int_cntl
= RREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[type
]);
2539 dc_hpd_int_cntl
|= DC_HPDx_INT_EN
;
2540 WREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[type
], dc_hpd_int_cntl
);
2549 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device
*adev
,
2550 struct amdgpu_irq_src
*src
,
2552 enum amdgpu_interrupt_state state
)
2555 case AMDGPU_CRTC_IRQ_VBLANK1
:
2556 dce_v6_0_set_crtc_vblank_interrupt_state(adev
, 0, state
);
2558 case AMDGPU_CRTC_IRQ_VBLANK2
:
2559 dce_v6_0_set_crtc_vblank_interrupt_state(adev
, 1, state
);
2561 case AMDGPU_CRTC_IRQ_VBLANK3
:
2562 dce_v6_0_set_crtc_vblank_interrupt_state(adev
, 2, state
);
2564 case AMDGPU_CRTC_IRQ_VBLANK4
:
2565 dce_v6_0_set_crtc_vblank_interrupt_state(adev
, 3, state
);
2567 case AMDGPU_CRTC_IRQ_VBLANK5
:
2568 dce_v6_0_set_crtc_vblank_interrupt_state(adev
, 4, state
);
2570 case AMDGPU_CRTC_IRQ_VBLANK6
:
2571 dce_v6_0_set_crtc_vblank_interrupt_state(adev
, 5, state
);
2573 case AMDGPU_CRTC_IRQ_VLINE1
:
2574 dce_v6_0_set_crtc_vline_interrupt_state(adev
, 0, state
);
2576 case AMDGPU_CRTC_IRQ_VLINE2
:
2577 dce_v6_0_set_crtc_vline_interrupt_state(adev
, 1, state
);
2579 case AMDGPU_CRTC_IRQ_VLINE3
:
2580 dce_v6_0_set_crtc_vline_interrupt_state(adev
, 2, state
);
2582 case AMDGPU_CRTC_IRQ_VLINE4
:
2583 dce_v6_0_set_crtc_vline_interrupt_state(adev
, 3, state
);
2585 case AMDGPU_CRTC_IRQ_VLINE5
:
2586 dce_v6_0_set_crtc_vline_interrupt_state(adev
, 4, state
);
2588 case AMDGPU_CRTC_IRQ_VLINE6
:
2589 dce_v6_0_set_crtc_vline_interrupt_state(adev
, 5, state
);
2597 static int dce_v6_0_crtc_irq(struct amdgpu_device
*adev
,
2598 struct amdgpu_irq_src
*source
,
2599 struct amdgpu_iv_entry
*entry
)
2601 unsigned crtc
= entry
->src_id
- 1;
2602 uint32_t disp_int
= RREG32(interrupt_status_offsets
[crtc
].reg
);
2603 unsigned irq_type
= amdgpu_crtc_idx_to_irq_type(adev
, crtc
);
2605 switch (entry
->src_data
) {
2606 case 0: /* vblank */
2607 if (disp_int
& interrupt_status_offsets
[crtc
].vblank
)
2608 WREG32(mmVBLANK_STATUS
+ crtc_offsets
[crtc
], VBLANK_ACK
);
2610 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2612 if (amdgpu_irq_enabled(adev
, source
, irq_type
)) {
2613 drm_handle_vblank(adev
->ddev
, crtc
);
2615 DRM_DEBUG("IH: D%d vblank\n", crtc
+ 1);
2618 if (disp_int
& interrupt_status_offsets
[crtc
].vline
)
2619 WREG32(mmVLINE_STATUS
+ crtc_offsets
[crtc
], VLINE_ACK
);
2621 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2623 DRM_DEBUG("IH: D%d vline\n", crtc
+ 1);
2626 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry
->src_id
, entry
->src_data
);
2633 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device
*adev
,
2634 struct amdgpu_irq_src
*src
,
2636 enum amdgpu_interrupt_state state
)
2640 if (type
>= adev
->mode_info
.num_crtc
) {
2641 DRM_ERROR("invalid pageflip crtc %d\n", type
);
2645 reg
= RREG32(mmGRPH_INTERRUPT_CONTROL
+ crtc_offsets
[type
]);
2646 if (state
== AMDGPU_IRQ_STATE_DISABLE
)
2647 WREG32(mmGRPH_INTERRUPT_CONTROL
+ crtc_offsets
[type
],
2648 reg
& ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK
);
2650 WREG32(mmGRPH_INTERRUPT_CONTROL
+ crtc_offsets
[type
],
2651 reg
| GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK
);
2656 static int dce_v6_0_pageflip_irq(struct amdgpu_device
*adev
,
2657 struct amdgpu_irq_src
*source
,
2658 struct amdgpu_iv_entry
*entry
)
2660 unsigned long flags
;
2662 struct amdgpu_crtc
*amdgpu_crtc
;
2663 struct amdgpu_flip_work
*works
;
2665 crtc_id
= (entry
->src_id
- 8) >> 1;
2666 amdgpu_crtc
= adev
->mode_info
.crtcs
[crtc_id
];
2668 if (crtc_id
>= adev
->mode_info
.num_crtc
) {
2669 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id
);
2673 if (RREG32(mmGRPH_INTERRUPT_STATUS
+ crtc_offsets
[crtc_id
]) &
2674 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK
)
2675 WREG32(mmGRPH_INTERRUPT_STATUS
+ crtc_offsets
[crtc_id
],
2676 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK
);
2678 /* IRQ could occur when in initial stage */
2679 if (amdgpu_crtc
== NULL
)
2682 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
2683 works
= amdgpu_crtc
->pflip_works
;
2684 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
2685 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2686 "AMDGPU_FLIP_SUBMITTED(%d)\n",
2687 amdgpu_crtc
->pflip_status
,
2688 AMDGPU_FLIP_SUBMITTED
);
2689 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
2693 /* page flip completed. clean up */
2694 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
2695 amdgpu_crtc
->pflip_works
= NULL
;
2697 /* wakeup usersapce */
2699 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, works
->event
);
2701 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
2703 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
2704 schedule_work(&works
->unpin_work
);
2709 static int dce_v6_0_hpd_irq(struct amdgpu_device
*adev
,
2710 struct amdgpu_irq_src
*source
,
2711 struct amdgpu_iv_entry
*entry
)
2713 uint32_t disp_int
, mask
, tmp
;
2716 if (entry
->src_data
>= adev
->mode_info
.num_hpd
) {
2717 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry
->src_id
, entry
->src_data
);
2721 hpd
= entry
->src_data
;
2722 disp_int
= RREG32(interrupt_status_offsets
[hpd
].reg
);
2723 mask
= interrupt_status_offsets
[hpd
].hpd
;
2725 if (disp_int
& mask
) {
2726 tmp
= RREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[hpd
]);
2727 tmp
|= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK
;
2728 WREG32(mmDC_HPD1_INT_CONTROL
+ hpd_offsets
[hpd
], tmp
);
2729 schedule_work(&adev
->hotplug_work
);
2730 DRM_INFO("IH: HPD%d\n", hpd
+ 1);
2737 static int dce_v6_0_set_clockgating_state(void *handle
,
2738 enum amd_clockgating_state state
)
2743 static int dce_v6_0_set_powergating_state(void *handle
,
2744 enum amd_powergating_state state
)
2749 static const struct amd_ip_funcs dce_v6_0_ip_funcs
= {
2751 .early_init
= dce_v6_0_early_init
,
2753 .sw_init
= dce_v6_0_sw_init
,
2754 .sw_fini
= dce_v6_0_sw_fini
,
2755 .hw_init
= dce_v6_0_hw_init
,
2756 .hw_fini
= dce_v6_0_hw_fini
,
2757 .suspend
= dce_v6_0_suspend
,
2758 .resume
= dce_v6_0_resume
,
2759 .is_idle
= dce_v6_0_is_idle
,
2760 .wait_for_idle
= dce_v6_0_wait_for_idle
,
2761 .soft_reset
= dce_v6_0_soft_reset
,
2762 .set_clockgating_state
= dce_v6_0_set_clockgating_state
,
2763 .set_powergating_state
= dce_v6_0_set_powergating_state
,
2767 dce_v6_0_encoder_mode_set(struct drm_encoder
*encoder
,
2768 struct drm_display_mode
*mode
,
2769 struct drm_display_mode
*adjusted_mode
)
2772 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
2774 amdgpu_encoder
->pixel_clock
= adjusted_mode
->clock
;
2776 /* need to call this here rather than in prepare() since we need some crtc info */
2777 amdgpu_atombios_encoder_dpms(encoder
, DRM_MODE_DPMS_OFF
);
2779 /* set scaler clears this on some chips */
2780 dce_v6_0_set_interleave(encoder
->crtc
, mode
);
2782 if (amdgpu_atombios_encoder_get_encoder_mode(encoder
) == ATOM_ENCODER_MODE_HDMI
) {
2783 dce_v6_0_afmt_enable(encoder
, true);
2784 dce_v6_0_afmt_setmode(encoder
, adjusted_mode
);
2788 static void dce_v6_0_encoder_prepare(struct drm_encoder
*encoder
)
2791 struct amdgpu_device
*adev
= encoder
->dev
->dev_private
;
2792 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
2793 struct drm_connector
*connector
= amdgpu_get_connector_for_encoder(encoder
);
2795 if ((amdgpu_encoder
->active_device
&
2796 (ATOM_DEVICE_DFP_SUPPORT
| ATOM_DEVICE_LCD_SUPPORT
)) ||
2797 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder
) !=
2798 ENCODER_OBJECT_ID_NONE
)) {
2799 struct amdgpu_encoder_atom_dig
*dig
= amdgpu_encoder
->enc_priv
;
2801 dig
->dig_encoder
= dce_v6_0_pick_dig_encoder(encoder
);
2802 if (amdgpu_encoder
->active_device
& ATOM_DEVICE_DFP_SUPPORT
)
2803 dig
->afmt
= adev
->mode_info
.afmt
[dig
->dig_encoder
];
2807 amdgpu_atombios_scratch_regs_lock(adev
, true);
2810 struct amdgpu_connector
*amdgpu_connector
= to_amdgpu_connector(connector
);
2812 /* select the clock/data port if it uses a router */
2813 if (amdgpu_connector
->router
.cd_valid
)
2814 amdgpu_i2c_router_select_cd_port(amdgpu_connector
);
2816 /* turn eDP panel on for mode set */
2817 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
2818 amdgpu_atombios_encoder_set_edp_panel_power(connector
,
2819 ATOM_TRANSMITTER_ACTION_POWER_ON
);
2822 /* this is needed for the pll/ss setup to work correctly in some cases */
2823 amdgpu_atombios_encoder_set_crtc_source(encoder
);
2824 /* set up the FMT blocks */
2825 dce_v6_0_program_fmt(encoder
);
2828 static void dce_v6_0_encoder_commit(struct drm_encoder
*encoder
)
2831 struct drm_device
*dev
= encoder
->dev
;
2832 struct amdgpu_device
*adev
= dev
->dev_private
;
2834 /* need to call this here as we need the crtc set up */
2835 amdgpu_atombios_encoder_dpms(encoder
, DRM_MODE_DPMS_ON
);
2836 amdgpu_atombios_scratch_regs_lock(adev
, false);
2839 static void dce_v6_0_encoder_disable(struct drm_encoder
*encoder
)
2842 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
2843 struct amdgpu_encoder_atom_dig
*dig
;
2845 amdgpu_atombios_encoder_dpms(encoder
, DRM_MODE_DPMS_OFF
);
2847 if (amdgpu_atombios_encoder_is_digital(encoder
)) {
2848 if (amdgpu_atombios_encoder_get_encoder_mode(encoder
) == ATOM_ENCODER_MODE_HDMI
)
2849 dce_v6_0_afmt_enable(encoder
, false);
2850 dig
= amdgpu_encoder
->enc_priv
;
2851 dig
->dig_encoder
= -1;
2853 amdgpu_encoder
->active_device
= 0;
2856 /* these are handled by the primary encoders */
2857 static void dce_v6_0_ext_prepare(struct drm_encoder
*encoder
)
2862 static void dce_v6_0_ext_commit(struct drm_encoder
*encoder
)
2868 dce_v6_0_ext_mode_set(struct drm_encoder
*encoder
,
2869 struct drm_display_mode
*mode
,
2870 struct drm_display_mode
*adjusted_mode
)
2875 static void dce_v6_0_ext_disable(struct drm_encoder
*encoder
)
2881 dce_v6_0_ext_dpms(struct drm_encoder
*encoder
, int mode
)
2886 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder
*encoder
,
2887 const struct drm_display_mode
*mode
,
2888 struct drm_display_mode
*adjusted_mode
)
2893 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs
= {
2894 .dpms
= dce_v6_0_ext_dpms
,
2895 .mode_fixup
= dce_v6_0_ext_mode_fixup
,
2896 .prepare
= dce_v6_0_ext_prepare
,
2897 .mode_set
= dce_v6_0_ext_mode_set
,
2898 .commit
= dce_v6_0_ext_commit
,
2899 .disable
= dce_v6_0_ext_disable
,
2900 /* no detect for TMDS/LVDS yet */
2903 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs
= {
2904 .dpms
= amdgpu_atombios_encoder_dpms
,
2905 .mode_fixup
= amdgpu_atombios_encoder_mode_fixup
,
2906 .prepare
= dce_v6_0_encoder_prepare
,
2907 .mode_set
= dce_v6_0_encoder_mode_set
,
2908 .commit
= dce_v6_0_encoder_commit
,
2909 .disable
= dce_v6_0_encoder_disable
,
2910 .detect
= amdgpu_atombios_encoder_dig_detect
,
2913 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs
= {
2914 .dpms
= amdgpu_atombios_encoder_dpms
,
2915 .mode_fixup
= amdgpu_atombios_encoder_mode_fixup
,
2916 .prepare
= dce_v6_0_encoder_prepare
,
2917 .mode_set
= dce_v6_0_encoder_mode_set
,
2918 .commit
= dce_v6_0_encoder_commit
,
2919 .detect
= amdgpu_atombios_encoder_dac_detect
,
2922 static void dce_v6_0_encoder_destroy(struct drm_encoder
*encoder
)
2924 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
2925 if (amdgpu_encoder
->devices
& (ATOM_DEVICE_LCD_SUPPORT
))
2926 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder
);
2927 kfree(amdgpu_encoder
->enc_priv
);
2928 drm_encoder_cleanup(encoder
);
2929 kfree(amdgpu_encoder
);
2932 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs
= {
2933 .destroy
= dce_v6_0_encoder_destroy
,
2936 static void dce_v6_0_encoder_add(struct amdgpu_device
*adev
,
2937 uint32_t encoder_enum
,
2938 uint32_t supported_device
,
2941 struct drm_device
*dev
= adev
->ddev
;
2942 struct drm_encoder
*encoder
;
2943 struct amdgpu_encoder
*amdgpu_encoder
;
2945 /* see if we already added it */
2946 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
2947 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
2948 if (amdgpu_encoder
->encoder_enum
== encoder_enum
) {
2949 amdgpu_encoder
->devices
|= supported_device
;
2956 amdgpu_encoder
= kzalloc(sizeof(struct amdgpu_encoder
), GFP_KERNEL
);
2957 if (!amdgpu_encoder
)
2960 encoder
= &amdgpu_encoder
->base
;
2961 switch (adev
->mode_info
.num_crtc
) {
2963 encoder
->possible_crtcs
= 0x1;
2967 encoder
->possible_crtcs
= 0x3;
2970 encoder
->possible_crtcs
= 0xf;
2973 encoder
->possible_crtcs
= 0x3f;
2977 amdgpu_encoder
->enc_priv
= NULL
;
2978 amdgpu_encoder
->encoder_enum
= encoder_enum
;
2979 amdgpu_encoder
->encoder_id
= (encoder_enum
& OBJECT_ID_MASK
) >> OBJECT_ID_SHIFT
;
2980 amdgpu_encoder
->devices
= supported_device
;
2981 amdgpu_encoder
->rmx_type
= RMX_OFF
;
2982 amdgpu_encoder
->underscan_type
= UNDERSCAN_OFF
;
2983 amdgpu_encoder
->is_ext_encoder
= false;
2984 amdgpu_encoder
->caps
= caps
;
2986 switch (amdgpu_encoder
->encoder_id
) {
2987 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1
:
2988 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2
:
2989 drm_encoder_init(dev
, encoder
, &dce_v6_0_encoder_funcs
,
2990 DRM_MODE_ENCODER_DAC
, NULL
);
2991 drm_encoder_helper_add(encoder
, &dce_v6_0_dac_helper_funcs
);
2993 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1
:
2994 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY
:
2995 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1
:
2996 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2
:
2997 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3
:
2998 if (amdgpu_encoder
->devices
& (ATOM_DEVICE_LCD_SUPPORT
)) {
2999 amdgpu_encoder
->rmx_type
= RMX_FULL
;
3000 drm_encoder_init(dev
, encoder
, &dce_v6_0_encoder_funcs
,
3001 DRM_MODE_ENCODER_LVDS
, NULL
);
3002 amdgpu_encoder
->enc_priv
= amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder
);
3003 } else if (amdgpu_encoder
->devices
& (ATOM_DEVICE_CRT_SUPPORT
)) {
3004 drm_encoder_init(dev
, encoder
, &dce_v6_0_encoder_funcs
,
3005 DRM_MODE_ENCODER_DAC
, NULL
);
3006 amdgpu_encoder
->enc_priv
= amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder
);
3008 drm_encoder_init(dev
, encoder
, &dce_v6_0_encoder_funcs
,
3009 DRM_MODE_ENCODER_TMDS
, NULL
);
3010 amdgpu_encoder
->enc_priv
= amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder
);
3012 drm_encoder_helper_add(encoder
, &dce_v6_0_dig_helper_funcs
);
3014 case ENCODER_OBJECT_ID_SI170B
:
3015 case ENCODER_OBJECT_ID_CH7303
:
3016 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA
:
3017 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB
:
3018 case ENCODER_OBJECT_ID_TITFP513
:
3019 case ENCODER_OBJECT_ID_VT1623
:
3020 case ENCODER_OBJECT_ID_HDMI_SI1930
:
3021 case ENCODER_OBJECT_ID_TRAVIS
:
3022 case ENCODER_OBJECT_ID_NUTMEG
:
3023 /* these are handled by the primary encoders */
3024 amdgpu_encoder
->is_ext_encoder
= true;
3025 if (amdgpu_encoder
->devices
& (ATOM_DEVICE_LCD_SUPPORT
))
3026 drm_encoder_init(dev
, encoder
, &dce_v6_0_encoder_funcs
,
3027 DRM_MODE_ENCODER_LVDS
, NULL
);
3028 else if (amdgpu_encoder
->devices
& (ATOM_DEVICE_CRT_SUPPORT
))
3029 drm_encoder_init(dev
, encoder
, &dce_v6_0_encoder_funcs
,
3030 DRM_MODE_ENCODER_DAC
, NULL
);
3032 drm_encoder_init(dev
, encoder
, &dce_v6_0_encoder_funcs
,
3033 DRM_MODE_ENCODER_TMDS
, NULL
);
3034 drm_encoder_helper_add(encoder
, &dce_v6_0_ext_helper_funcs
);
3039 static const struct amdgpu_display_funcs dce_v6_0_display_funcs
= {
3040 .set_vga_render_state
= &dce_v6_0_set_vga_render_state
,
3041 .bandwidth_update
= &dce_v6_0_bandwidth_update
,
3042 .vblank_get_counter
= &dce_v6_0_vblank_get_counter
,
3043 .vblank_wait
= &dce_v6_0_vblank_wait
,
3044 .backlight_set_level
= &amdgpu_atombios_encoder_set_backlight_level
,
3045 .backlight_get_level
= &amdgpu_atombios_encoder_get_backlight_level
,
3046 .hpd_sense
= &dce_v6_0_hpd_sense
,
3047 .hpd_set_polarity
= &dce_v6_0_hpd_set_polarity
,
3048 .hpd_get_gpio_reg
= &dce_v6_0_hpd_get_gpio_reg
,
3049 .page_flip
= &dce_v6_0_page_flip
,
3050 .page_flip_get_scanoutpos
= &dce_v6_0_crtc_get_scanoutpos
,
3051 .add_encoder
= &dce_v6_0_encoder_add
,
3052 .add_connector
= &amdgpu_connector_add
,
3053 .stop_mc_access
= &dce_v6_0_stop_mc_access
,
3054 .resume_mc_access
= &dce_v6_0_resume_mc_access
,
3057 static void dce_v6_0_set_display_funcs(struct amdgpu_device
*adev
)
3059 if (adev
->mode_info
.funcs
== NULL
)
3060 adev
->mode_info
.funcs
= &dce_v6_0_display_funcs
;
3063 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs
= {
3064 .set
= dce_v6_0_set_crtc_interrupt_state
,
3065 .process
= dce_v6_0_crtc_irq
,
3068 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs
= {
3069 .set
= dce_v6_0_set_pageflip_interrupt_state
,
3070 .process
= dce_v6_0_pageflip_irq
,
3073 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs
= {
3074 .set
= dce_v6_0_set_hpd_interrupt_state
,
3075 .process
= dce_v6_0_hpd_irq
,
3078 static void dce_v6_0_set_irq_funcs(struct amdgpu_device
*adev
)
3080 adev
->crtc_irq
.num_types
= AMDGPU_CRTC_IRQ_LAST
;
3081 adev
->crtc_irq
.funcs
= &dce_v6_0_crtc_irq_funcs
;
3083 adev
->pageflip_irq
.num_types
= AMDGPU_PAGEFLIP_IRQ_LAST
;
3084 adev
->pageflip_irq
.funcs
= &dce_v6_0_pageflip_irq_funcs
;
3086 adev
->hpd_irq
.num_types
= AMDGPU_HPD_LAST
;
3087 adev
->hpd_irq
.funcs
= &dce_v6_0_hpd_irq_funcs
;
3090 const struct amdgpu_ip_block_version dce_v6_0_ip_block
=
3092 .type
= AMD_IP_BLOCK_TYPE_DCE
,
3096 .funcs
= &dce_v6_0_ip_funcs
,
3099 const struct amdgpu_ip_block_version dce_v6_4_ip_block
=
3101 .type
= AMD_IP_BLOCK_TYPE_DCE
,
3105 .funcs
= &dce_v6_0_ip_funcs
,