]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drm/amdgpu/dce8: Move hotspot handling out of set_cursor
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / dce_v8_0.c
CommitLineData
a2e73f56
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "drmP.h"
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "cikd.h"
28#include "atom.h"
29#include "amdgpu_atombios.h"
30#include "atombios_crtc.h"
31#include "atombios_encoders.h"
32#include "amdgpu_pll.h"
33#include "amdgpu_connectors.h"
34
35#include "dce/dce_8_0_d.h"
36#include "dce/dce_8_0_sh_mask.h"
37
38#include "gca/gfx_7_2_enum.h"
39
40#include "gmc/gmc_7_1_d.h"
41#include "gmc/gmc_7_1_sh_mask.h"
42
43#include "oss/oss_2_0_d.h"
44#include "oss/oss_2_0_sh_mask.h"
45
46static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
47static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
48
49static const u32 crtc_offsets[6] =
50{
51 CRTC0_REGISTER_OFFSET,
52 CRTC1_REGISTER_OFFSET,
53 CRTC2_REGISTER_OFFSET,
54 CRTC3_REGISTER_OFFSET,
55 CRTC4_REGISTER_OFFSET,
56 CRTC5_REGISTER_OFFSET
57};
58
59static const uint32_t dig_offsets[] = {
60 CRTC0_REGISTER_OFFSET,
61 CRTC1_REGISTER_OFFSET,
62 CRTC2_REGISTER_OFFSET,
63 CRTC3_REGISTER_OFFSET,
64 CRTC4_REGISTER_OFFSET,
65 CRTC5_REGISTER_OFFSET,
66 (0x13830 - 0x7030) >> 2,
67};
68
69static const struct {
70 uint32_t reg;
71 uint32_t vblank;
72 uint32_t vline;
73 uint32_t hpd;
74
75} interrupt_status_offsets[6] = { {
76 .reg = mmDISP_INTERRUPT_STATUS,
77 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
78 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
79 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
80}, {
81 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
82 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
83 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
84 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
85}, {
86 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
87 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
88 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
89 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
90}, {
91 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
92 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
93 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
94 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
95}, {
96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
98 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
100}, {
101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
103 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
105} };
106
107static const uint32_t hpd_int_control_offsets[6] = {
108 mmDC_HPD1_INT_CONTROL,
109 mmDC_HPD2_INT_CONTROL,
110 mmDC_HPD3_INT_CONTROL,
111 mmDC_HPD4_INT_CONTROL,
112 mmDC_HPD5_INT_CONTROL,
113 mmDC_HPD6_INT_CONTROL,
114};
115
116static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
117 u32 block_offset, u32 reg)
118{
119 unsigned long flags;
120 u32 r;
121
122 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
123 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
124 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
125 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
126
127 return r;
128}
129
130static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
131 u32 block_offset, u32 reg, u32 v)
132{
133 unsigned long flags;
134
135 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
136 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
137 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
138 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
139}
140
141static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
142{
143 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
144 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
145 return true;
146 else
147 return false;
148}
149
150static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
151{
152 u32 pos1, pos2;
153
154 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
155 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
156
157 if (pos1 != pos2)
158 return true;
159 else
160 return false;
161}
162
163/**
164 * dce_v8_0_vblank_wait - vblank wait asic callback.
165 *
166 * @adev: amdgpu_device pointer
167 * @crtc: crtc to wait for vblank on
168 *
169 * Wait for vblank on the requested crtc (evergreen+).
170 */
171static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
172{
173 unsigned i = 0;
174
175 if (crtc >= adev->mode_info.num_crtc)
176 return;
177
178 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
179 return;
180
181 /* depending on when we hit vblank, we may be close to active; if so,
182 * wait for another frame.
183 */
184 while (dce_v8_0_is_in_vblank(adev, crtc)) {
185 if (i++ % 100 == 0) {
186 if (!dce_v8_0_is_counter_moving(adev, crtc))
187 break;
188 }
189 }
190
191 while (!dce_v8_0_is_in_vblank(adev, crtc)) {
192 if (i++ % 100 == 0) {
193 if (!dce_v8_0_is_counter_moving(adev, crtc))
194 break;
195 }
196 }
197}
198
199static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
200{
201 if (crtc >= adev->mode_info.num_crtc)
202 return 0;
203 else
204 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
205}
206
207/**
208 * dce_v8_0_page_flip - pageflip callback.
209 *
210 * @adev: amdgpu_device pointer
211 * @crtc_id: crtc to cleanup pageflip on
212 * @crtc_base: new address of the crtc (GPU MC address)
213 *
214 * Does the actual pageflip (evergreen+).
215 * During vblank we take the crtc lock and wait for the update_pending
216 * bit to go high, when it does, we release the lock, and allow the
217 * double buffered update to take place.
218 * Returns the current update pending status.
219 */
220static void dce_v8_0_page_flip(struct amdgpu_device *adev,
221 int crtc_id, u64 crtc_base)
222{
223 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
224 u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
225 int i;
226
227 /* Lock the graphics update lock */
228 tmp |= GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
229 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
230
231 /* update the scanout addresses */
232 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
233 upper_32_bits(crtc_base));
234 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
235 (u32)crtc_base);
236
237 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
238 upper_32_bits(crtc_base));
239 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
240 (u32)crtc_base);
241
242 /* Wait for update_pending to go high. */
243 for (i = 0; i < adev->usec_timeout; i++) {
244 if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
245 GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
246 break;
247 udelay(1);
248 }
249 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
250
251 /* Unlock the lock, so double-buffering can take place inside vblank */
252 tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
253 WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
254}
255
256static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 u32 *vbl, u32 *position)
258{
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 return -EINVAL;
261
262 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
263 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
264
265 return 0;
266}
267
268/**
269 * dce_v8_0_hpd_sense - hpd sense callback.
270 *
271 * @adev: amdgpu_device pointer
272 * @hpd: hpd (hotplug detect) pin
273 *
274 * Checks if a digital monitor is connected (evergreen+).
275 * Returns true if connected, false if not connected.
276 */
277static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
278 enum amdgpu_hpd_id hpd)
279{
280 bool connected = false;
281
282 switch (hpd) {
283 case AMDGPU_HPD_1:
284 if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
285 connected = true;
286 break;
287 case AMDGPU_HPD_2:
288 if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK)
289 connected = true;
290 break;
291 case AMDGPU_HPD_3:
292 if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK)
293 connected = true;
294 break;
295 case AMDGPU_HPD_4:
296 if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK)
297 connected = true;
298 break;
299 case AMDGPU_HPD_5:
300 if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK)
301 connected = true;
302 break;
303 case AMDGPU_HPD_6:
304 if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK)
305 connected = true;
306 break;
307 default:
308 break;
309 }
310
311 return connected;
312}
313
314/**
315 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
316 *
317 * @adev: amdgpu_device pointer
318 * @hpd: hpd (hotplug detect) pin
319 *
320 * Set the polarity of the hpd pin (evergreen+).
321 */
322static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
323 enum amdgpu_hpd_id hpd)
324{
325 u32 tmp;
326 bool connected = dce_v8_0_hpd_sense(adev, hpd);
327
328 switch (hpd) {
329 case AMDGPU_HPD_1:
330 tmp = RREG32(mmDC_HPD1_INT_CONTROL);
331 if (connected)
332 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
333 else
334 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
335 WREG32(mmDC_HPD1_INT_CONTROL, tmp);
336 break;
337 case AMDGPU_HPD_2:
338 tmp = RREG32(mmDC_HPD2_INT_CONTROL);
339 if (connected)
340 tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
341 else
342 tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK;
343 WREG32(mmDC_HPD2_INT_CONTROL, tmp);
344 break;
345 case AMDGPU_HPD_3:
346 tmp = RREG32(mmDC_HPD3_INT_CONTROL);
347 if (connected)
348 tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
349 else
350 tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK;
351 WREG32(mmDC_HPD3_INT_CONTROL, tmp);
352 break;
353 case AMDGPU_HPD_4:
354 tmp = RREG32(mmDC_HPD4_INT_CONTROL);
355 if (connected)
356 tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
357 else
358 tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK;
359 WREG32(mmDC_HPD4_INT_CONTROL, tmp);
360 break;
361 case AMDGPU_HPD_5:
362 tmp = RREG32(mmDC_HPD5_INT_CONTROL);
363 if (connected)
364 tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
365 else
366 tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK;
367 WREG32(mmDC_HPD5_INT_CONTROL, tmp);
368 break;
369 case AMDGPU_HPD_6:
370 tmp = RREG32(mmDC_HPD6_INT_CONTROL);
371 if (connected)
372 tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
373 else
374 tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK;
375 WREG32(mmDC_HPD6_INT_CONTROL, tmp);
376 break;
377 default:
378 break;
379 }
380}
381
382/**
383 * dce_v8_0_hpd_init - hpd setup callback.
384 *
385 * @adev: amdgpu_device pointer
386 *
387 * Setup the hpd pins used by the card (evergreen+).
388 * Enable the pin, set the polarity, and enable the hpd interrupts.
389 */
390static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
391{
392 struct drm_device *dev = adev->ddev;
393 struct drm_connector *connector;
394 u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) |
395 (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) |
396 DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
397
398 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
399 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
400
401 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
402 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
403 /* don't try to enable hpd on eDP or LVDS avoid breaking the
404 * aux dp channel on imac and help (but not completely fix)
405 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
406 * also avoid interrupt storms during dpms.
407 */
408 continue;
409 }
410 switch (amdgpu_connector->hpd.hpd) {
411 case AMDGPU_HPD_1:
412 WREG32(mmDC_HPD1_CONTROL, tmp);
413 break;
414 case AMDGPU_HPD_2:
415 WREG32(mmDC_HPD2_CONTROL, tmp);
416 break;
417 case AMDGPU_HPD_3:
418 WREG32(mmDC_HPD3_CONTROL, tmp);
419 break;
420 case AMDGPU_HPD_4:
421 WREG32(mmDC_HPD4_CONTROL, tmp);
422 break;
423 case AMDGPU_HPD_5:
424 WREG32(mmDC_HPD5_CONTROL, tmp);
425 break;
426 case AMDGPU_HPD_6:
427 WREG32(mmDC_HPD6_CONTROL, tmp);
428 break;
429 default:
430 break;
431 }
432 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
433 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
434 }
435}
436
437/**
438 * dce_v8_0_hpd_fini - hpd tear down callback.
439 *
440 * @adev: amdgpu_device pointer
441 *
442 * Tear down the hpd pins used by the card (evergreen+).
443 * Disable the hpd interrupts.
444 */
445static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
446{
447 struct drm_device *dev = adev->ddev;
448 struct drm_connector *connector;
449
450 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
451 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
452
453 switch (amdgpu_connector->hpd.hpd) {
454 case AMDGPU_HPD_1:
455 WREG32(mmDC_HPD1_CONTROL, 0);
456 break;
457 case AMDGPU_HPD_2:
458 WREG32(mmDC_HPD2_CONTROL, 0);
459 break;
460 case AMDGPU_HPD_3:
461 WREG32(mmDC_HPD3_CONTROL, 0);
462 break;
463 case AMDGPU_HPD_4:
464 WREG32(mmDC_HPD4_CONTROL, 0);
465 break;
466 case AMDGPU_HPD_5:
467 WREG32(mmDC_HPD5_CONTROL, 0);
468 break;
469 case AMDGPU_HPD_6:
470 WREG32(mmDC_HPD6_CONTROL, 0);
471 break;
472 default:
473 break;
474 }
475 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
476 }
477}
478
479static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
480{
481 return mmDC_GPIO_HPD_A;
482}
483
484static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
485{
486 u32 crtc_hung = 0;
487 u32 crtc_status[6];
488 u32 i, j, tmp;
489
490 for (i = 0; i < adev->mode_info.num_crtc; i++) {
491 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
492 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
493 crtc_hung |= (1 << i);
494 }
495 }
496
497 for (j = 0; j < 10; j++) {
498 for (i = 0; i < adev->mode_info.num_crtc; i++) {
499 if (crtc_hung & (1 << i)) {
500 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
501 if (tmp != crtc_status[i])
502 crtc_hung &= ~(1 << i);
503 }
504 }
505 if (crtc_hung == 0)
506 return false;
507 udelay(100);
508 }
509
510 return true;
511}
512
513static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
514 struct amdgpu_mode_mc_save *save)
515{
516 u32 crtc_enabled, tmp;
517 int i;
518
519 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
520 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
521
522 /* disable VGA render */
523 tmp = RREG32(mmVGA_RENDER_CONTROL);
524 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
525 WREG32(mmVGA_RENDER_CONTROL, tmp);
526
527 /* blank the display controllers */
528 for (i = 0; i < adev->mode_info.num_crtc; i++) {
529 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
530 CRTC_CONTROL, CRTC_MASTER_EN);
531 if (crtc_enabled) {
532#if 0
533 u32 frame_count;
534 int j;
535
536 save->crtc_enabled[i] = true;
537 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
538 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
539 amdgpu_display_vblank_wait(adev, i);
540 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
541 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
542 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
543 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
544 }
545 /* wait for the next frame */
546 frame_count = amdgpu_display_vblank_get_counter(adev, i);
547 for (j = 0; j < adev->usec_timeout; j++) {
548 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
549 break;
550 udelay(1);
551 }
552 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
553 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
554 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
555 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
556 }
557 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
558 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
559 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
560 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
561 }
562#else
563 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
564 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
565 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
566 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
567 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
568 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
569 save->crtc_enabled[i] = false;
570 /* ***** */
571#endif
572 } else {
573 save->crtc_enabled[i] = false;
574 }
575 }
576}
577
578static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
579 struct amdgpu_mode_mc_save *save)
580{
581 u32 tmp, frame_count;
582 int i, j;
583
584 /* update crtc base addresses */
585 for (i = 0; i < adev->mode_info.num_crtc; i++) {
586 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
587 upper_32_bits(adev->mc.vram_start));
588 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
589 upper_32_bits(adev->mc.vram_start));
590 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
591 (u32)adev->mc.vram_start);
592 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
593 (u32)adev->mc.vram_start);
594
595 if (save->crtc_enabled[i]) {
596 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
597 if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
598 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
599 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
600 }
601 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
602 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
603 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
604 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
605 }
606 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
607 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
608 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
609 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
610 }
611 for (j = 0; j < adev->usec_timeout; j++) {
612 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
613 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
614 break;
615 udelay(1);
616 }
617 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
618 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
619 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
620 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
621 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
622 /* wait for the next frame */
623 frame_count = amdgpu_display_vblank_get_counter(adev, i);
624 for (j = 0; j < adev->usec_timeout; j++) {
625 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
626 break;
627 udelay(1);
628 }
629 }
630 }
631
632 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
633 WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
634
635 /* Unlock vga access */
636 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
637 mdelay(1);
638 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
639}
640
641static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
642 bool render)
643{
644 u32 tmp;
645
646 /* Lockout access through VGA aperture*/
647 tmp = RREG32(mmVGA_HDP_CONTROL);
648 if (render)
649 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
650 else
651 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
652 WREG32(mmVGA_HDP_CONTROL, tmp);
653
654 /* disable VGA render */
655 tmp = RREG32(mmVGA_RENDER_CONTROL);
656 if (render)
657 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
658 else
659 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
660 WREG32(mmVGA_RENDER_CONTROL, tmp);
661}
662
663static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
664{
665 struct drm_device *dev = encoder->dev;
666 struct amdgpu_device *adev = dev->dev_private;
667 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
668 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
669 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
670 int bpc = 0;
671 u32 tmp = 0;
672 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
673
674 if (connector) {
675 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
676 bpc = amdgpu_connector_get_monitor_bpc(connector);
677 dither = amdgpu_connector->dither;
678 }
679
680 /* LVDS/eDP FMT is set up by atom */
681 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
682 return;
683
684 /* not needed for analog */
685 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
686 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
687 return;
688
689 if (bpc == 0)
690 return;
691
692 switch (bpc) {
693 case 6:
694 if (dither == AMDGPU_FMT_DITHER_ENABLE)
695 /* XXX sort out optimal dither settings */
696 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
697 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
698 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
699 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
700 else
701 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
702 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
703 break;
704 case 8:
705 if (dither == AMDGPU_FMT_DITHER_ENABLE)
706 /* XXX sort out optimal dither settings */
707 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
708 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
709 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
710 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
711 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
712 else
713 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
714 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
715 break;
716 case 10:
717 if (dither == AMDGPU_FMT_DITHER_ENABLE)
718 /* XXX sort out optimal dither settings */
719 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
720 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
721 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
722 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
723 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
724 else
725 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
726 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
727 break;
728 default:
729 /* not needed */
730 break;
731 }
732
733 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
734}
735
736
737/* display watermark setup */
738/**
739 * dce_v8_0_line_buffer_adjust - Set up the line buffer
740 *
741 * @adev: amdgpu_device pointer
742 * @amdgpu_crtc: the selected display controller
743 * @mode: the current display mode on the selected display
744 * controller
745 *
746 * Setup up the line buffer allocation for
747 * the selected display controller (CIK).
748 * Returns the line buffer size in pixels.
749 */
750static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
751 struct amdgpu_crtc *amdgpu_crtc,
752 struct drm_display_mode *mode)
753{
754 u32 tmp, buffer_alloc, i;
755 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
756 /*
757 * Line Buffer Setup
758 * There are 6 line buffers, one for each display controllers.
759 * There are 3 partitions per LB. Select the number of partitions
760 * to enable based on the display width. For display widths larger
761 * than 4096, you need use to use 2 display controllers and combine
762 * them using the stereo blender.
763 */
764 if (amdgpu_crtc->base.enabled && mode) {
765 if (mode->crtc_hdisplay < 1920) {
766 tmp = 1;
767 buffer_alloc = 2;
768 } else if (mode->crtc_hdisplay < 2560) {
769 tmp = 2;
770 buffer_alloc = 2;
771 } else if (mode->crtc_hdisplay < 4096) {
772 tmp = 0;
2f7d10b3 773 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
a2e73f56
AD
774 } else {
775 DRM_DEBUG_KMS("Mode too big for LB!\n");
776 tmp = 0;
2f7d10b3 777 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
a2e73f56
AD
778 }
779 } else {
780 tmp = 1;
781 buffer_alloc = 0;
782 }
783
784 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
785 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
786 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
787
788 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
789 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
790 for (i = 0; i < adev->usec_timeout; i++) {
791 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
792 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
793 break;
794 udelay(1);
795 }
796
797 if (amdgpu_crtc->base.enabled && mode) {
798 switch (tmp) {
799 case 0:
800 default:
801 return 4096 * 2;
802 case 1:
803 return 1920 * 2;
804 case 2:
805 return 2560 * 2;
806 }
807 }
808
809 /* controller not enabled, so no lb used */
810 return 0;
811}
812
813/**
814 * cik_get_number_of_dram_channels - get the number of dram channels
815 *
816 * @adev: amdgpu_device pointer
817 *
818 * Look up the number of video ram channels (CIK).
819 * Used for display watermark bandwidth calculations
820 * Returns the number of dram channels
821 */
822static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
823{
824 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
825
826 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
827 case 0:
828 default:
829 return 1;
830 case 1:
831 return 2;
832 case 2:
833 return 4;
834 case 3:
835 return 8;
836 case 4:
837 return 3;
838 case 5:
839 return 6;
840 case 6:
841 return 10;
842 case 7:
843 return 12;
844 case 8:
845 return 16;
846 }
847}
848
849struct dce8_wm_params {
850 u32 dram_channels; /* number of dram channels */
851 u32 yclk; /* bandwidth per dram data pin in kHz */
852 u32 sclk; /* engine clock in kHz */
853 u32 disp_clk; /* display clock in kHz */
854 u32 src_width; /* viewport width */
855 u32 active_time; /* active display time in ns */
856 u32 blank_time; /* blank time in ns */
857 bool interlaced; /* mode is interlaced */
858 fixed20_12 vsc; /* vertical scale ratio */
859 u32 num_heads; /* number of active crtcs */
860 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
861 u32 lb_size; /* line buffer allocated to pipe */
862 u32 vtaps; /* vertical scaler taps */
863};
864
865/**
866 * dce_v8_0_dram_bandwidth - get the dram bandwidth
867 *
868 * @wm: watermark calculation data
869 *
870 * Calculate the raw dram bandwidth (CIK).
871 * Used for display watermark bandwidth calculations
872 * Returns the dram bandwidth in MBytes/s
873 */
874static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
875{
876 /* Calculate raw DRAM Bandwidth */
877 fixed20_12 dram_efficiency; /* 0.7 */
878 fixed20_12 yclk, dram_channels, bandwidth;
879 fixed20_12 a;
880
881 a.full = dfixed_const(1000);
882 yclk.full = dfixed_const(wm->yclk);
883 yclk.full = dfixed_div(yclk, a);
884 dram_channels.full = dfixed_const(wm->dram_channels * 4);
885 a.full = dfixed_const(10);
886 dram_efficiency.full = dfixed_const(7);
887 dram_efficiency.full = dfixed_div(dram_efficiency, a);
888 bandwidth.full = dfixed_mul(dram_channels, yclk);
889 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
890
891 return dfixed_trunc(bandwidth);
892}
893
894/**
895 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
896 *
897 * @wm: watermark calculation data
898 *
899 * Calculate the dram bandwidth used for display (CIK).
900 * Used for display watermark bandwidth calculations
901 * Returns the dram bandwidth for display in MBytes/s
902 */
903static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
904{
905 /* Calculate DRAM Bandwidth and the part allocated to display. */
906 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
907 fixed20_12 yclk, dram_channels, bandwidth;
908 fixed20_12 a;
909
910 a.full = dfixed_const(1000);
911 yclk.full = dfixed_const(wm->yclk);
912 yclk.full = dfixed_div(yclk, a);
913 dram_channels.full = dfixed_const(wm->dram_channels * 4);
914 a.full = dfixed_const(10);
915 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
916 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
917 bandwidth.full = dfixed_mul(dram_channels, yclk);
918 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
919
920 return dfixed_trunc(bandwidth);
921}
922
923/**
924 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
925 *
926 * @wm: watermark calculation data
927 *
928 * Calculate the data return bandwidth used for display (CIK).
929 * Used for display watermark bandwidth calculations
930 * Returns the data return bandwidth in MBytes/s
931 */
932static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
933{
934 /* Calculate the display Data return Bandwidth */
935 fixed20_12 return_efficiency; /* 0.8 */
936 fixed20_12 sclk, bandwidth;
937 fixed20_12 a;
938
939 a.full = dfixed_const(1000);
940 sclk.full = dfixed_const(wm->sclk);
941 sclk.full = dfixed_div(sclk, a);
942 a.full = dfixed_const(10);
943 return_efficiency.full = dfixed_const(8);
944 return_efficiency.full = dfixed_div(return_efficiency, a);
945 a.full = dfixed_const(32);
946 bandwidth.full = dfixed_mul(a, sclk);
947 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
948
949 return dfixed_trunc(bandwidth);
950}
951
952/**
953 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
954 *
955 * @wm: watermark calculation data
956 *
957 * Calculate the dmif bandwidth used for display (CIK).
958 * Used for display watermark bandwidth calculations
959 * Returns the dmif bandwidth in MBytes/s
960 */
961static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
962{
963 /* Calculate the DMIF Request Bandwidth */
964 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
965 fixed20_12 disp_clk, bandwidth;
966 fixed20_12 a, b;
967
968 a.full = dfixed_const(1000);
969 disp_clk.full = dfixed_const(wm->disp_clk);
970 disp_clk.full = dfixed_div(disp_clk, a);
971 a.full = dfixed_const(32);
972 b.full = dfixed_mul(a, disp_clk);
973
974 a.full = dfixed_const(10);
975 disp_clk_request_efficiency.full = dfixed_const(8);
976 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
977
978 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
979
980 return dfixed_trunc(bandwidth);
981}
982
983/**
984 * dce_v8_0_available_bandwidth - get the min available bandwidth
985 *
986 * @wm: watermark calculation data
987 *
988 * Calculate the min available bandwidth used for display (CIK).
989 * Used for display watermark bandwidth calculations
990 * Returns the min available bandwidth in MBytes/s
991 */
992static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
993{
994 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
995 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
996 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
997 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
998
999 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1000}
1001
1002/**
1003 * dce_v8_0_average_bandwidth - get the average available bandwidth
1004 *
1005 * @wm: watermark calculation data
1006 *
1007 * Calculate the average available bandwidth used for display (CIK).
1008 * Used for display watermark bandwidth calculations
1009 * Returns the average available bandwidth in MBytes/s
1010 */
1011static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
1012{
1013 /* Calculate the display mode Average Bandwidth
1014 * DisplayMode should contain the source and destination dimensions,
1015 * timing, etc.
1016 */
1017 fixed20_12 bpp;
1018 fixed20_12 line_time;
1019 fixed20_12 src_width;
1020 fixed20_12 bandwidth;
1021 fixed20_12 a;
1022
1023 a.full = dfixed_const(1000);
1024 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1025 line_time.full = dfixed_div(line_time, a);
1026 bpp.full = dfixed_const(wm->bytes_per_pixel);
1027 src_width.full = dfixed_const(wm->src_width);
1028 bandwidth.full = dfixed_mul(src_width, bpp);
1029 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1030 bandwidth.full = dfixed_div(bandwidth, line_time);
1031
1032 return dfixed_trunc(bandwidth);
1033}
1034
1035/**
1036 * dce_v8_0_latency_watermark - get the latency watermark
1037 *
1038 * @wm: watermark calculation data
1039 *
1040 * Calculate the latency watermark (CIK).
1041 * Used for display watermark bandwidth calculations
1042 * Returns the latency watermark in ns
1043 */
1044static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
1045{
1046 /* First calculate the latency in ns */
1047 u32 mc_latency = 2000; /* 2000 ns. */
1048 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
1049 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1050 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1051 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1052 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1053 (wm->num_heads * cursor_line_pair_return_time);
1054 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1055 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1056 u32 tmp, dmif_size = 12288;
1057 fixed20_12 a, b, c;
1058
1059 if (wm->num_heads == 0)
1060 return 0;
1061
1062 a.full = dfixed_const(2);
1063 b.full = dfixed_const(1);
1064 if ((wm->vsc.full > a.full) ||
1065 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1066 (wm->vtaps >= 5) ||
1067 ((wm->vsc.full >= a.full) && wm->interlaced))
1068 max_src_lines_per_dst_line = 4;
1069 else
1070 max_src_lines_per_dst_line = 2;
1071
1072 a.full = dfixed_const(available_bandwidth);
1073 b.full = dfixed_const(wm->num_heads);
1074 a.full = dfixed_div(a, b);
1075
1076 b.full = dfixed_const(mc_latency + 512);
1077 c.full = dfixed_const(wm->disp_clk);
1078 b.full = dfixed_div(b, c);
1079
1080 c.full = dfixed_const(dmif_size);
1081 b.full = dfixed_div(c, b);
1082
1083 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1084
1085 b.full = dfixed_const(1000);
1086 c.full = dfixed_const(wm->disp_clk);
1087 b.full = dfixed_div(c, b);
1088 c.full = dfixed_const(wm->bytes_per_pixel);
1089 b.full = dfixed_mul(b, c);
1090
1091 lb_fill_bw = min(tmp, dfixed_trunc(b));
1092
1093 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1094 b.full = dfixed_const(1000);
1095 c.full = dfixed_const(lb_fill_bw);
1096 b.full = dfixed_div(c, b);
1097 a.full = dfixed_div(a, b);
1098 line_fill_time = dfixed_trunc(a);
1099
1100 if (line_fill_time < wm->active_time)
1101 return latency;
1102 else
1103 return latency + (line_fill_time - wm->active_time);
1104
1105}
1106
1107/**
1108 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1109 * average and available dram bandwidth
1110 *
1111 * @wm: watermark calculation data
1112 *
1113 * Check if the display average bandwidth fits in the display
1114 * dram bandwidth (CIK).
1115 * Used for display watermark bandwidth calculations
1116 * Returns true if the display fits, false if not.
1117 */
1118static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
1119{
1120 if (dce_v8_0_average_bandwidth(wm) <=
1121 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1122 return true;
1123 else
1124 return false;
1125}
1126
1127/**
1128 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
1129 * average and available bandwidth
1130 *
1131 * @wm: watermark calculation data
1132 *
1133 * Check if the display average bandwidth fits in the display
1134 * available bandwidth (CIK).
1135 * Used for display watermark bandwidth calculations
1136 * Returns true if the display fits, false if not.
1137 */
1138static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
1139{
1140 if (dce_v8_0_average_bandwidth(wm) <=
1141 (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
1142 return true;
1143 else
1144 return false;
1145}
1146
1147/**
1148 * dce_v8_0_check_latency_hiding - check latency hiding
1149 *
1150 * @wm: watermark calculation data
1151 *
1152 * Check latency hiding (CIK).
1153 * Used for display watermark bandwidth calculations
1154 * Returns true if the display fits, false if not.
1155 */
1156static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
1157{
1158 u32 lb_partitions = wm->lb_size / wm->src_width;
1159 u32 line_time = wm->active_time + wm->blank_time;
1160 u32 latency_tolerant_lines;
1161 u32 latency_hiding;
1162 fixed20_12 a;
1163
1164 a.full = dfixed_const(1);
1165 if (wm->vsc.full > a.full)
1166 latency_tolerant_lines = 1;
1167 else {
1168 if (lb_partitions <= (wm->vtaps + 1))
1169 latency_tolerant_lines = 1;
1170 else
1171 latency_tolerant_lines = 2;
1172 }
1173
1174 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1175
1176 if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
1177 return true;
1178 else
1179 return false;
1180}
1181
1182/**
1183 * dce_v8_0_program_watermarks - program display watermarks
1184 *
1185 * @adev: amdgpu_device pointer
1186 * @amdgpu_crtc: the selected display controller
1187 * @lb_size: line buffer size
1188 * @num_heads: number of display controllers in use
1189 *
1190 * Calculate and program the display watermarks for the
1191 * selected display controller (CIK).
1192 */
1193static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1194 struct amdgpu_crtc *amdgpu_crtc,
1195 u32 lb_size, u32 num_heads)
1196{
1197 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1198 struct dce8_wm_params wm_low, wm_high;
1199 u32 pixel_period;
1200 u32 line_time = 0;
1201 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1202 u32 tmp, wm_mask;
1203
1204 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1205 pixel_period = 1000000 / (u32)mode->clock;
1206 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1207
1208 /* watermark for high clocks */
1209 if (adev->pm.dpm_enabled) {
1210 wm_high.yclk =
1211 amdgpu_dpm_get_mclk(adev, false) * 10;
1212 wm_high.sclk =
1213 amdgpu_dpm_get_sclk(adev, false) * 10;
1214 } else {
1215 wm_high.yclk = adev->pm.current_mclk * 10;
1216 wm_high.sclk = adev->pm.current_sclk * 10;
1217 }
1218
1219 wm_high.disp_clk = mode->clock;
1220 wm_high.src_width = mode->crtc_hdisplay;
1221 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1222 wm_high.blank_time = line_time - wm_high.active_time;
1223 wm_high.interlaced = false;
1224 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1225 wm_high.interlaced = true;
1226 wm_high.vsc = amdgpu_crtc->vsc;
1227 wm_high.vtaps = 1;
1228 if (amdgpu_crtc->rmx_type != RMX_OFF)
1229 wm_high.vtaps = 2;
1230 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1231 wm_high.lb_size = lb_size;
1232 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1233 wm_high.num_heads = num_heads;
1234
1235 /* set for high clocks */
1236 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
1237
1238 /* possibly force display priority to high */
1239 /* should really do this at mode validation time... */
1240 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1241 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1242 !dce_v8_0_check_latency_hiding(&wm_high) ||
1243 (adev->mode_info.disp_priority == 2)) {
1244 DRM_DEBUG_KMS("force priority to high\n");
1245 }
1246
1247 /* watermark for low clocks */
1248 if (adev->pm.dpm_enabled) {
1249 wm_low.yclk =
1250 amdgpu_dpm_get_mclk(adev, true) * 10;
1251 wm_low.sclk =
1252 amdgpu_dpm_get_sclk(adev, true) * 10;
1253 } else {
1254 wm_low.yclk = adev->pm.current_mclk * 10;
1255 wm_low.sclk = adev->pm.current_sclk * 10;
1256 }
1257
1258 wm_low.disp_clk = mode->clock;
1259 wm_low.src_width = mode->crtc_hdisplay;
1260 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1261 wm_low.blank_time = line_time - wm_low.active_time;
1262 wm_low.interlaced = false;
1263 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1264 wm_low.interlaced = true;
1265 wm_low.vsc = amdgpu_crtc->vsc;
1266 wm_low.vtaps = 1;
1267 if (amdgpu_crtc->rmx_type != RMX_OFF)
1268 wm_low.vtaps = 2;
1269 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1270 wm_low.lb_size = lb_size;
1271 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1272 wm_low.num_heads = num_heads;
1273
1274 /* set for low clocks */
1275 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
1276
1277 /* possibly force display priority to high */
1278 /* should really do this at mode validation time... */
1279 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1280 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1281 !dce_v8_0_check_latency_hiding(&wm_low) ||
1282 (adev->mode_info.disp_priority == 2)) {
1283 DRM_DEBUG_KMS("force priority to high\n");
1284 }
1285 }
1286
1287 /* select wm A */
1288 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1289 tmp = wm_mask;
1290 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1291 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1292 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1293 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1294 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1295 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1296 /* select wm B */
1297 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1298 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1299 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1300 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1301 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1302 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1303 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1304 /* restore original selection */
1305 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1306
1307 /* save values for DPM */
1308 amdgpu_crtc->line_time = line_time;
1309 amdgpu_crtc->wm_high = latency_watermark_a;
1310 amdgpu_crtc->wm_low = latency_watermark_b;
1311}
1312
1313/**
1314 * dce_v8_0_bandwidth_update - program display watermarks
1315 *
1316 * @adev: amdgpu_device pointer
1317 *
1318 * Calculate and program the display watermarks and line
1319 * buffer allocation (CIK).
1320 */
1321static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1322{
1323 struct drm_display_mode *mode = NULL;
1324 u32 num_heads = 0, lb_size;
1325 int i;
1326
1327 amdgpu_update_display_priority(adev);
1328
1329 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1330 if (adev->mode_info.crtcs[i]->base.enabled)
1331 num_heads++;
1332 }
1333 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1334 mode = &adev->mode_info.crtcs[i]->base.mode;
1335 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1336 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1337 lb_size, num_heads);
1338 }
1339}
1340
1341static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1342{
1343 int i;
1344 u32 offset, tmp;
1345
1346 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1347 offset = adev->mode_info.audio.pin[i].offset;
1348 tmp = RREG32_AUDIO_ENDPT(offset,
1349 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1350 if (((tmp &
1351 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1352 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1353 adev->mode_info.audio.pin[i].connected = false;
1354 else
1355 adev->mode_info.audio.pin[i].connected = true;
1356 }
1357}
1358
1359static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1360{
1361 int i;
1362
1363 dce_v8_0_audio_get_connected_pins(adev);
1364
1365 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1366 if (adev->mode_info.audio.pin[i].connected)
1367 return &adev->mode_info.audio.pin[i];
1368 }
1369 DRM_ERROR("No connected audio pins found!\n");
1370 return NULL;
1371}
1372
1373static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1374{
1375 struct amdgpu_device *adev = encoder->dev->dev_private;
1376 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1377 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1378 u32 offset;
1379
1380 if (!dig || !dig->afmt || !dig->afmt->pin)
1381 return;
1382
1383 offset = dig->afmt->offset;
1384
1385 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1386 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1387}
1388
1389static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1390 struct drm_display_mode *mode)
1391{
1392 struct amdgpu_device *adev = encoder->dev->dev_private;
1393 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1394 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1395 struct drm_connector *connector;
1396 struct amdgpu_connector *amdgpu_connector = NULL;
1397 u32 tmp = 0, offset;
1398
1399 if (!dig || !dig->afmt || !dig->afmt->pin)
1400 return;
1401
1402 offset = dig->afmt->pin->offset;
1403
1404 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1405 if (connector->encoder == encoder) {
1406 amdgpu_connector = to_amdgpu_connector(connector);
1407 break;
1408 }
1409 }
1410
1411 if (!amdgpu_connector) {
1412 DRM_ERROR("Couldn't find encoder's connector\n");
1413 return;
1414 }
1415
1416 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1417 if (connector->latency_present[1])
1418 tmp =
1419 (connector->video_latency[1] <<
1420 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1421 (connector->audio_latency[1] <<
1422 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1423 else
1424 tmp =
1425 (0 <<
1426 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1427 (0 <<
1428 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1429 } else {
1430 if (connector->latency_present[0])
1431 tmp =
1432 (connector->video_latency[0] <<
1433 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1434 (connector->audio_latency[0] <<
1435 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1436 else
1437 tmp =
1438 (0 <<
1439 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1440 (0 <<
1441 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1442
1443 }
1444 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1445}
1446
1447static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1448{
1449 struct amdgpu_device *adev = encoder->dev->dev_private;
1450 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1451 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1452 struct drm_connector *connector;
1453 struct amdgpu_connector *amdgpu_connector = NULL;
1454 u32 offset, tmp;
1455 u8 *sadb = NULL;
1456 int sad_count;
1457
1458 if (!dig || !dig->afmt || !dig->afmt->pin)
1459 return;
1460
1461 offset = dig->afmt->pin->offset;
1462
1463 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1464 if (connector->encoder == encoder) {
1465 amdgpu_connector = to_amdgpu_connector(connector);
1466 break;
1467 }
1468 }
1469
1470 if (!amdgpu_connector) {
1471 DRM_ERROR("Couldn't find encoder's connector\n");
1472 return;
1473 }
1474
1475 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1476 if (sad_count < 0) {
1477 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1478 sad_count = 0;
1479 }
1480
1481 /* program the speaker allocation */
1482 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1483 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1484 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1485 /* set HDMI mode */
1486 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1487 if (sad_count)
1488 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1489 else
1490 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1491 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1492
1493 kfree(sadb);
1494}
1495
1496static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1497{
1498 struct amdgpu_device *adev = encoder->dev->dev_private;
1499 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1500 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1501 u32 offset;
1502 struct drm_connector *connector;
1503 struct amdgpu_connector *amdgpu_connector = NULL;
1504 struct cea_sad *sads;
1505 int i, sad_count;
1506
1507 static const u16 eld_reg_to_type[][2] = {
1508 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1509 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1510 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1511 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1512 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1513 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1514 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1515 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1516 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1517 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1518 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1519 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1520 };
1521
1522 if (!dig || !dig->afmt || !dig->afmt->pin)
1523 return;
1524
1525 offset = dig->afmt->pin->offset;
1526
1527 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1528 if (connector->encoder == encoder) {
1529 amdgpu_connector = to_amdgpu_connector(connector);
1530 break;
1531 }
1532 }
1533
1534 if (!amdgpu_connector) {
1535 DRM_ERROR("Couldn't find encoder's connector\n");
1536 return;
1537 }
1538
1539 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1540 if (sad_count <= 0) {
1541 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1542 return;
1543 }
1544 BUG_ON(!sads);
1545
1546 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1547 u32 value = 0;
1548 u8 stereo_freqs = 0;
1549 int max_channels = -1;
1550 int j;
1551
1552 for (j = 0; j < sad_count; j++) {
1553 struct cea_sad *sad = &sads[j];
1554
1555 if (sad->format == eld_reg_to_type[i][1]) {
1556 if (sad->channels > max_channels) {
1557 value = (sad->channels <<
1558 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1559 (sad->byte2 <<
1560 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1561 (sad->freq <<
1562 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1563 max_channels = sad->channels;
1564 }
1565
1566 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1567 stereo_freqs |= sad->freq;
1568 else
1569 break;
1570 }
1571 }
1572
1573 value |= (stereo_freqs <<
1574 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1575
1576 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1577 }
1578
1579 kfree(sads);
1580}
1581
1582static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1583 struct amdgpu_audio_pin *pin,
1584 bool enable)
1585{
1586 if (!pin)
1587 return;
1588
1589 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1590 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1591}
1592
1593static const u32 pin_offsets[7] =
1594{
1595 (0x1780 - 0x1780),
1596 (0x1786 - 0x1780),
1597 (0x178c - 0x1780),
1598 (0x1792 - 0x1780),
1599 (0x1798 - 0x1780),
1600 (0x179d - 0x1780),
1601 (0x17a4 - 0x1780),
1602};
1603
1604static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1605{
1606 int i;
1607
1608 if (!amdgpu_audio)
1609 return 0;
1610
1611 adev->mode_info.audio.enabled = true;
1612
1613 if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1614 adev->mode_info.audio.num_pins = 7;
1615 else if ((adev->asic_type == CHIP_KABINI) ||
1616 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1617 adev->mode_info.audio.num_pins = 3;
1618 else if ((adev->asic_type == CHIP_BONAIRE) ||
1619 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1620 adev->mode_info.audio.num_pins = 7;
1621 else
1622 adev->mode_info.audio.num_pins = 3;
1623
1624 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1625 adev->mode_info.audio.pin[i].channels = -1;
1626 adev->mode_info.audio.pin[i].rate = -1;
1627 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1628 adev->mode_info.audio.pin[i].status_bits = 0;
1629 adev->mode_info.audio.pin[i].category_code = 0;
1630 adev->mode_info.audio.pin[i].connected = false;
1631 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1632 adev->mode_info.audio.pin[i].id = i;
1633 /* disable audio. it will be set up later */
1634 /* XXX remove once we switch to ip funcs */
1635 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1636 }
1637
1638 return 0;
1639}
1640
1641static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1642{
1643 int i;
1644
1645 if (!adev->mode_info.audio.enabled)
1646 return;
1647
1648 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1649 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1650
1651 adev->mode_info.audio.enabled = false;
1652}
1653
1654/*
1655 * update the N and CTS parameters for a given pixel clock rate
1656 */
1657static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1658{
1659 struct drm_device *dev = encoder->dev;
1660 struct amdgpu_device *adev = dev->dev_private;
1661 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1662 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1663 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1664 uint32_t offset = dig->afmt->offset;
1665
1666 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1667 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1668
1669 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1670 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1671
1672 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1673 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1674}
1675
1676/*
1677 * build a HDMI Video Info Frame
1678 */
1679static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1680 void *buffer, size_t size)
1681{
1682 struct drm_device *dev = encoder->dev;
1683 struct amdgpu_device *adev = dev->dev_private;
1684 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1685 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1686 uint32_t offset = dig->afmt->offset;
1687 uint8_t *frame = buffer + 3;
1688 uint8_t *header = buffer;
1689
1690 WREG32(mmAFMT_AVI_INFO0 + offset,
1691 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1692 WREG32(mmAFMT_AVI_INFO1 + offset,
1693 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1694 WREG32(mmAFMT_AVI_INFO2 + offset,
1695 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1696 WREG32(mmAFMT_AVI_INFO3 + offset,
1697 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1698}
1699
1700static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1701{
1702 struct drm_device *dev = encoder->dev;
1703 struct amdgpu_device *adev = dev->dev_private;
1704 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1705 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1706 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1707 u32 dto_phase = 24 * 1000;
1708 u32 dto_modulo = clock;
1709
1710 if (!dig || !dig->afmt)
1711 return;
1712
1713 /* XXX two dtos; generally use dto0 for hdmi */
1714 /* Express [24MHz / target pixel clock] as an exact rational
1715 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1716 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1717 */
1718 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1719 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1720 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1721}
1722
1723/*
1724 * update the info frames with the data from the current display mode
1725 */
1726static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1727 struct drm_display_mode *mode)
1728{
1729 struct drm_device *dev = encoder->dev;
1730 struct amdgpu_device *adev = dev->dev_private;
1731 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1732 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1733 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1734 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1735 struct hdmi_avi_infoframe frame;
1736 uint32_t offset, val;
1737 ssize_t err;
1738 int bpc = 8;
1739
1740 if (!dig || !dig->afmt)
1741 return;
1742
1743 /* Silent, r600_hdmi_enable will raise WARN for us */
1744 if (!dig->afmt->enabled)
1745 return;
1746 offset = dig->afmt->offset;
1747
1748 /* hdmi deep color mode general control packets setup, if bpc > 8 */
1749 if (encoder->crtc) {
1750 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1751 bpc = amdgpu_crtc->bpc;
1752 }
1753
1754 /* disable audio prior to setting up hw */
1755 dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1756 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1757
1758 dce_v8_0_audio_set_dto(encoder, mode->clock);
1759
1760 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1761 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1762
1763 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1764
1765 val = RREG32(mmHDMI_CONTROL + offset);
1766 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1767 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1768
1769 switch (bpc) {
1770 case 0:
1771 case 6:
1772 case 8:
1773 case 16:
1774 default:
1775 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1776 connector->name, bpc);
1777 break;
1778 case 10:
1779 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1780 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1781 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1782 connector->name);
1783 break;
1784 case 12:
1785 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1786 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1787 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1788 connector->name);
1789 break;
1790 }
1791
1792 WREG32(mmHDMI_CONTROL + offset, val);
1793
1794 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1795 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1796 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1797 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1798
1799 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1800 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1801 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1802
1803 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1804 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1805
1806 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1807 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1808
1809 WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1810
1811 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1812 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1813 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1814
1815 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1816 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1817
1818 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1819
1820 if (bpc > 8)
1821 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1822 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1823 else
1824 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1825 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1826 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1827
1828 dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1829
1830 WREG32(mmAFMT_60958_0 + offset,
1831 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1832
1833 WREG32(mmAFMT_60958_1 + offset,
1834 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1835
1836 WREG32(mmAFMT_60958_2 + offset,
1837 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1838 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1839 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1840 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1841 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1842 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1843
1844 dce_v8_0_audio_write_speaker_allocation(encoder);
1845
1846
1847 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1848 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1849
1850 dce_v8_0_afmt_audio_select_pin(encoder);
1851 dce_v8_0_audio_write_sad_regs(encoder);
1852 dce_v8_0_audio_write_latency_fields(encoder, mode);
1853
1854 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1855 if (err < 0) {
1856 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1857 return;
1858 }
1859
1860 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1861 if (err < 0) {
1862 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1863 return;
1864 }
1865
1866 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1867
1868 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1869 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1870 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
1871
1872 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1873 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1874 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1875
1876 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1877 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1878
1879 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
1880 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1881 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1882 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1883 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1884
1885 /* enable audio after to setting up hw */
1886 dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1887}
1888
1889static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1890{
1891 struct drm_device *dev = encoder->dev;
1892 struct amdgpu_device *adev = dev->dev_private;
1893 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1894 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1895
1896 if (!dig || !dig->afmt)
1897 return;
1898
1899 /* Silent, r600_hdmi_enable will raise WARN for us */
1900 if (enable && dig->afmt->enabled)
1901 return;
1902 if (!enable && !dig->afmt->enabled)
1903 return;
1904
1905 if (!enable && dig->afmt->pin) {
1906 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1907 dig->afmt->pin = NULL;
1908 }
1909
1910 dig->afmt->enabled = enable;
1911
1912 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1913 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1914}
1915
1916static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
1917{
1918 int i;
1919
1920 for (i = 0; i < adev->mode_info.num_dig; i++)
1921 adev->mode_info.afmt[i] = NULL;
1922
1923 /* DCE8 has audio blocks tied to DIG encoders */
1924 for (i = 0; i < adev->mode_info.num_dig; i++) {
1925 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1926 if (adev->mode_info.afmt[i]) {
1927 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1928 adev->mode_info.afmt[i]->id = i;
1929 }
1930 }
1931}
1932
1933static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1934{
1935 int i;
1936
1937 for (i = 0; i < adev->mode_info.num_dig; i++) {
1938 kfree(adev->mode_info.afmt[i]);
1939 adev->mode_info.afmt[i] = NULL;
1940 }
1941}
1942
1943static const u32 vga_control_regs[6] =
1944{
1945 mmD1VGA_CONTROL,
1946 mmD2VGA_CONTROL,
1947 mmD3VGA_CONTROL,
1948 mmD4VGA_CONTROL,
1949 mmD5VGA_CONTROL,
1950 mmD6VGA_CONTROL,
1951};
1952
1953static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1954{
1955 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1956 struct drm_device *dev = crtc->dev;
1957 struct amdgpu_device *adev = dev->dev_private;
1958 u32 vga_control;
1959
1960 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1961 if (enable)
1962 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1963 else
1964 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1965}
1966
1967static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1968{
1969 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1970 struct drm_device *dev = crtc->dev;
1971 struct amdgpu_device *adev = dev->dev_private;
1972
1973 if (enable)
1974 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1975 else
1976 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1977}
1978
a2e73f56
AD
1979static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1980 struct drm_framebuffer *fb,
1981 int x, int y, int atomic)
1982{
1983 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1984 struct drm_device *dev = crtc->dev;
1985 struct amdgpu_device *adev = dev->dev_private;
1986 struct amdgpu_framebuffer *amdgpu_fb;
1987 struct drm_framebuffer *target_fb;
1988 struct drm_gem_object *obj;
1989 struct amdgpu_bo *rbo;
1990 uint64_t fb_location, tiling_flags;
1991 uint32_t fb_format, fb_pitch_pixels;
a2e73f56 1992 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
fbd76d59 1993 u32 pipe_config;
a2e73f56
AD
1994 u32 tmp, viewport_w, viewport_h;
1995 int r;
1996 bool bypass_lut = false;
1997
1998 /* no fb bound */
1999 if (!atomic && !crtc->primary->fb) {
2000 DRM_DEBUG_KMS("No FB bound\n");
2001 return 0;
2002 }
2003
2004 if (atomic) {
2005 amdgpu_fb = to_amdgpu_framebuffer(fb);
2006 target_fb = fb;
2007 }
2008 else {
2009 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2010 target_fb = crtc->primary->fb;
2011 }
2012
2013 /* If atomic, assume fb object is pinned & idle & fenced and
2014 * just update base pointers
2015 */
2016 obj = amdgpu_fb->obj;
2017 rbo = gem_to_amdgpu_bo(obj);
2018 r = amdgpu_bo_reserve(rbo, false);
2019 if (unlikely(r != 0))
2020 return r;
2021
2022 if (atomic)
2023 fb_location = amdgpu_bo_gpu_offset(rbo);
2024 else {
2025 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2026 if (unlikely(r != 0)) {
2027 amdgpu_bo_unreserve(rbo);
2028 return -EINVAL;
2029 }
2030 }
2031
2032 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
2033 amdgpu_bo_unreserve(rbo);
2034
fbd76d59
MO
2035 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2036
a2e73f56
AD
2037 switch (target_fb->pixel_format) {
2038 case DRM_FORMAT_C8:
2039 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2040 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2041 break;
2042 case DRM_FORMAT_XRGB4444:
2043 case DRM_FORMAT_ARGB4444:
2044 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2045 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2046#ifdef __BIG_ENDIAN
2047 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2048#endif
2049 break;
2050 case DRM_FORMAT_XRGB1555:
2051 case DRM_FORMAT_ARGB1555:
2052 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2053 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2054#ifdef __BIG_ENDIAN
2055 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2056#endif
2057 break;
2058 case DRM_FORMAT_BGRX5551:
2059 case DRM_FORMAT_BGRA5551:
2060 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2061 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2062#ifdef __BIG_ENDIAN
2063 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2064#endif
2065 break;
2066 case DRM_FORMAT_RGB565:
2067 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2068 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2069#ifdef __BIG_ENDIAN
2070 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2071#endif
2072 break;
2073 case DRM_FORMAT_XRGB8888:
2074 case DRM_FORMAT_ARGB8888:
2075 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2076 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2077#ifdef __BIG_ENDIAN
2078 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2079#endif
2080 break;
2081 case DRM_FORMAT_XRGB2101010:
2082 case DRM_FORMAT_ARGB2101010:
2083 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2084 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2085#ifdef __BIG_ENDIAN
2086 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2087#endif
2088 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2089 bypass_lut = true;
2090 break;
2091 case DRM_FORMAT_BGRX1010102:
2092 case DRM_FORMAT_BGRA1010102:
2093 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
2094 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
2095#ifdef __BIG_ENDIAN
2096 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
2097#endif
2098 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2099 bypass_lut = true;
2100 break;
2101 default:
2102 DRM_ERROR("Unsupported screen format %s\n",
2103 drm_get_format_name(target_fb->pixel_format));
2104 return -EINVAL;
2105 }
2106
fbd76d59
MO
2107 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2108 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
a2e73f56 2109
fbd76d59
MO
2110 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2111 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2112 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2113 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2114 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
a2e73f56 2115
a2e73f56
AD
2116 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2117 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2118 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2119 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2120 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2121 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2122 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
fbd76d59 2123 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
a2e73f56
AD
2124 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2125 }
2126
a2e73f56
AD
2127 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2128
2129 dce_v8_0_vga_enable(crtc, false);
2130
2131 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2132 upper_32_bits(fb_location));
2133 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2134 upper_32_bits(fb_location));
2135 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2136 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2137 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2138 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2139 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2140 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2141
2142 /*
2143 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2144 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2145 * retain the full precision throughout the pipeline.
2146 */
2147 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
2148 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
2149 ~LUT_10BIT_BYPASS_EN);
2150
2151 if (bypass_lut)
2152 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2153
2154 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2155 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2156 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2157 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2158 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2159 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2160
2161 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2162 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2163
2164 dce_v8_0_grph_enable(crtc, true);
2165
2166 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2167 target_fb->height);
2168
2169 x &= ~3;
2170 y &= ~1;
2171 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2172 (x << 16) | y);
2173 viewport_w = crtc->mode.hdisplay;
2174 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2175 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2176 (viewport_w << 16) | viewport_h);
2177
2178 /* pageflip setup */
2179 /* make sure flip is at vb rather than hb */
2180 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2181 tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
2182 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2183
2184 /* set pageflip to happen only at start of vblank interval (front porch) */
2185 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
2186
2187 if (!atomic && fb && fb != crtc->primary->fb) {
2188 amdgpu_fb = to_amdgpu_framebuffer(fb);
2189 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2190 r = amdgpu_bo_reserve(rbo, false);
2191 if (unlikely(r != 0))
2192 return r;
2193 amdgpu_bo_unpin(rbo);
2194 amdgpu_bo_unreserve(rbo);
2195 }
2196
2197 /* Bytes per pixel may have changed */
2198 dce_v8_0_bandwidth_update(adev);
2199
2200 return 0;
2201}
2202
2203static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2204 struct drm_display_mode *mode)
2205{
2206 struct drm_device *dev = crtc->dev;
2207 struct amdgpu_device *adev = dev->dev_private;
2208 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2209
2210 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2211 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2212 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2213 else
2214 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2215}
2216
2217static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2218{
2219 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2220 struct drm_device *dev = crtc->dev;
2221 struct amdgpu_device *adev = dev->dev_private;
2222 int i;
2223
2224 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2225
2226 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2227 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2228 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2229 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2230 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2231 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2232 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2233 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2234 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2235 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2236
2237 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2238
2239 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2240 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2241 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2242
2243 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2244 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2245 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2246
2247 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2248 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2249
2250 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2251 for (i = 0; i < 256; i++) {
2252 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2253 (amdgpu_crtc->lut_r[i] << 20) |
2254 (amdgpu_crtc->lut_g[i] << 10) |
2255 (amdgpu_crtc->lut_b[i] << 0));
2256 }
2257
2258 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2259 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2260 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2261 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2262 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2263 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2264 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2265 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2266 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2267 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2268 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2269 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2270 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2271 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2272 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2273 /* XXX this only needs to be programmed once per crtc at startup,
2274 * not sure where the best place for it is
2275 */
2276 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2277 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2278}
2279
2280static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2281{
2282 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2283 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2284
2285 switch (amdgpu_encoder->encoder_id) {
2286 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2287 if (dig->linkb)
2288 return 1;
2289 else
2290 return 0;
2291 break;
2292 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2293 if (dig->linkb)
2294 return 3;
2295 else
2296 return 2;
2297 break;
2298 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2299 if (dig->linkb)
2300 return 5;
2301 else
2302 return 4;
2303 break;
2304 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2305 return 6;
2306 break;
2307 default:
2308 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2309 return 0;
2310 }
2311}
2312
2313/**
2314 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2315 *
2316 * @crtc: drm crtc
2317 *
2318 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2319 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2320 * monitors a dedicated PPLL must be used. If a particular board has
2321 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2322 * as there is no need to program the PLL itself. If we are not able to
2323 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2324 * avoid messing up an existing monitor.
2325 *
2326 * Asic specific PLL information
2327 *
2328 * DCE 8.x
2329 * KB/KV
2330 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2331 * CI
2332 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2333 *
2334 */
2335static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2336{
2337 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2338 struct drm_device *dev = crtc->dev;
2339 struct amdgpu_device *adev = dev->dev_private;
2340 u32 pll_in_use;
2341 int pll;
2342
2343 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2344 if (adev->clock.dp_extclk)
2345 /* skip PPLL programming if using ext clock */
2346 return ATOM_PPLL_INVALID;
2347 else {
2348 /* use the same PPLL for all DP monitors */
2349 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2350 if (pll != ATOM_PPLL_INVALID)
2351 return pll;
2352 }
2353 } else {
2354 /* use the same PPLL for all monitors with the same clock */
2355 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2356 if (pll != ATOM_PPLL_INVALID)
2357 return pll;
2358 }
2359 /* otherwise, pick one of the plls */
2360 if ((adev->asic_type == CHIP_KABINI) ||
2361 (adev->asic_type == CHIP_MULLINS)) {
2362 /* KB/ML has PPLL1 and PPLL2 */
2363 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2364 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2365 return ATOM_PPLL2;
2366 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2367 return ATOM_PPLL1;
2368 DRM_ERROR("unable to allocate a PPLL\n");
2369 return ATOM_PPLL_INVALID;
2370 } else {
2371 /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2372 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2373 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2374 return ATOM_PPLL2;
2375 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2376 return ATOM_PPLL1;
2377 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2378 return ATOM_PPLL0;
2379 DRM_ERROR("unable to allocate a PPLL\n");
2380 return ATOM_PPLL_INVALID;
2381 }
2382 return ATOM_PPLL_INVALID;
2383}
2384
2385static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2386{
2387 struct amdgpu_device *adev = crtc->dev->dev_private;
2388 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2389 uint32_t cur_lock;
2390
2391 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2392 if (lock)
2393 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2394 else
2395 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2396 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2397}
2398
2399static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2400{
2401 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2402 struct amdgpu_device *adev = crtc->dev->dev_private;
2403
2404 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2405 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2406 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2407}
2408
2409static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2410{
2411 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2412 struct amdgpu_device *adev = crtc->dev->dev_private;
2413
2414 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2415 CUR_CONTROL__CURSOR_EN_MASK |
2416 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2417 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2418}
2419
77ed35b8
AD
2420static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2421 int x, int y)
a2e73f56
AD
2422{
2423 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2424 struct amdgpu_device *adev = crtc->dev->dev_private;
2425 int xorigin = 0, yorigin = 0;
2426
2427 /* avivo cursor are offset into the total surface */
2428 x += crtc->x;
2429 y += crtc->y;
2430 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2431
2432 if (x < 0) {
2433 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2434 x = 0;
2435 }
2436 if (y < 0) {
2437 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2438 y = 0;
2439 }
2440
a2e73f56
AD
2441 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2442 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2443 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2444 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
77ed35b8
AD
2445
2446 amdgpu_crtc->cursor_x = x;
2447 amdgpu_crtc->cursor_y = y;
a2e73f56
AD
2448
2449 return 0;
2450}
2451
c4e0dfad 2452static int dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
77ed35b8
AD
2453{
2454 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2455 struct amdgpu_device *adev = crtc->dev->dev_private;
fd70cf63
AD
2456 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(obj);
2457 uint64_t gpu_addr;
2458 int ret;
2459
2460 ret = amdgpu_bo_reserve(aobj, false);
2461 if (unlikely(ret != 0))
2462 goto fail;
2463
2464 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
2465 amdgpu_bo_unreserve(aobj);
2466 if (ret)
2467 goto fail;
77ed35b8
AD
2468
2469 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2470 upper_32_bits(gpu_addr));
2471 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2472 lower_32_bits(gpu_addr));
2473
fd70cf63
AD
2474 return 0;
2475
2476fail:
2477 drm_gem_object_unreference_unlocked(obj);
2478
2479 return ret;
77ed35b8
AD
2480}
2481
2482static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2483 int x, int y)
2484{
2485 int ret;
2486
2487 dce_v8_0_lock_cursor(crtc, true);
2488 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2489 dce_v8_0_lock_cursor(crtc, false);
2490
2491 return ret;
2492}
2493
2494static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2495 struct drm_file *file_priv,
2496 uint32_t handle,
2497 uint32_t width,
2498 uint32_t height,
2499 int32_t hot_x,
2500 int32_t hot_y)
a2e73f56
AD
2501{
2502 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2503 struct drm_gem_object *obj;
a2e73f56
AD
2504 int ret;
2505
2506 if (!handle) {
2507 /* turn off cursor */
2508 dce_v8_0_hide_cursor(crtc);
2509 obj = NULL;
2510 goto unpin;
2511 }
2512
2513 if ((width > amdgpu_crtc->max_cursor_width) ||
2514 (height > amdgpu_crtc->max_cursor_height)) {
2515 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2516 return -EINVAL;
2517 }
2518
2519 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
2520 if (!obj) {
2521 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2522 return -ENOENT;
2523 }
2524
a2e73f56
AD
2525 amdgpu_crtc->cursor_width = width;
2526 amdgpu_crtc->cursor_height = height;
2527
2528 dce_v8_0_lock_cursor(crtc, true);
c4e0dfad
AD
2529
2530 if (hot_x != amdgpu_crtc->cursor_hot_x ||
2531 hot_y != amdgpu_crtc->cursor_hot_y) {
2532 int x, y;
2533
2534 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2535 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2536
2537 dce_v8_0_cursor_move_locked(crtc, x, y);
2538
2539 amdgpu_crtc->cursor_hot_x = hot_x;
2540 amdgpu_crtc->cursor_hot_y = hot_y;
2541 }
2542
2543 ret = dce_v8_0_set_cursor(crtc, obj);
fd70cf63
AD
2544 if (ret)
2545 DRM_ERROR("dce_v8_0_set_cursor returned %d, not changing cursor\n",
2546 ret);
2547 else
2548 dce_v8_0_show_cursor(crtc);
a2e73f56
AD
2549 dce_v8_0_lock_cursor(crtc, false);
2550
2551unpin:
2552 if (amdgpu_crtc->cursor_bo) {
fd70cf63
AD
2553 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2554 ret = amdgpu_bo_reserve(aobj, false);
a2e73f56 2555 if (likely(ret == 0)) {
fd70cf63
AD
2556 amdgpu_bo_unpin(aobj);
2557 amdgpu_bo_unreserve(aobj);
a2e73f56 2558 }
fd70cf63
AD
2559 if (amdgpu_crtc->cursor_bo != obj)
2560 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
a2e73f56
AD
2561 }
2562
2563 amdgpu_crtc->cursor_bo = obj;
2564 return 0;
fd70cf63 2565}
a2e73f56 2566
fd70cf63
AD
2567static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2568{
2569 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2570 int ret;
2571
2572 if (amdgpu_crtc->cursor_bo) {
2573 dce_v8_0_lock_cursor(crtc, true);
2574
2575 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2576 amdgpu_crtc->cursor_y);
2577
c4e0dfad 2578 ret = dce_v8_0_set_cursor(crtc, amdgpu_crtc->cursor_bo);
fd70cf63
AD
2579 if (ret)
2580 DRM_ERROR("dce_v8_0_set_cursor returned %d, not showing "
2581 "cursor\n", ret);
2582 else
2583 dce_v8_0_show_cursor(crtc);
2584
2585 dce_v8_0_lock_cursor(crtc, false);
2586 }
a2e73f56
AD
2587}
2588
2589static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2590 u16 *blue, uint32_t start, uint32_t size)
2591{
2592 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2593 int end = (start + size > 256) ? 256 : start + size, i;
2594
2595 /* userspace palettes are always correct as is */
2596 for (i = start; i < end; i++) {
2597 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2598 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2599 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2600 }
2601 dce_v8_0_crtc_load_lut(crtc);
2602}
2603
2604static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2605{
2606 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2607
2608 drm_crtc_cleanup(crtc);
2609 destroy_workqueue(amdgpu_crtc->pflip_queue);
2610 kfree(amdgpu_crtc);
2611}
2612
2613static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
77ed35b8 2614 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
a2e73f56
AD
2615 .cursor_move = dce_v8_0_crtc_cursor_move,
2616 .gamma_set = dce_v8_0_crtc_gamma_set,
2617 .set_config = amdgpu_crtc_set_config,
2618 .destroy = dce_v8_0_crtc_destroy,
2619 .page_flip = amdgpu_crtc_page_flip,
2620};
2621
2622static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2623{
2624 struct drm_device *dev = crtc->dev;
2625 struct amdgpu_device *adev = dev->dev_private;
2626 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1002d718 2627 unsigned type;
a2e73f56
AD
2628
2629 switch (mode) {
2630 case DRM_MODE_DPMS_ON:
2631 amdgpu_crtc->enabled = true;
2632 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2633 dce_v8_0_vga_enable(crtc, true);
2634 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2635 dce_v8_0_vga_enable(crtc, false);
1002d718
MD
2636 /* Make sure VBLANK interrupt is still enabled */
2637 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2638 amdgpu_irq_update(adev, &adev->crtc_irq, type);
a2e73f56
AD
2639 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2640 dce_v8_0_crtc_load_lut(crtc);
2641 break;
2642 case DRM_MODE_DPMS_STANDBY:
2643 case DRM_MODE_DPMS_SUSPEND:
2644 case DRM_MODE_DPMS_OFF:
2645 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
2646 if (amdgpu_crtc->enabled) {
2647 dce_v8_0_vga_enable(crtc, true);
2648 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2649 dce_v8_0_vga_enable(crtc, false);
2650 }
2651 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2652 amdgpu_crtc->enabled = false;
2653 break;
2654 }
2655 /* adjust pm to dpms */
2656 amdgpu_pm_compute_clocks(adev);
2657}
2658
2659static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2660{
2661 /* disable crtc pair power gating before programming */
2662 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2663 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2664 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2665}
2666
2667static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2668{
2669 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2670 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2671}
2672
2673static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2674{
2675 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2676 struct drm_device *dev = crtc->dev;
2677 struct amdgpu_device *adev = dev->dev_private;
2678 struct amdgpu_atom_ss ss;
2679 int i;
2680
2681 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2682 if (crtc->primary->fb) {
2683 int r;
2684 struct amdgpu_framebuffer *amdgpu_fb;
2685 struct amdgpu_bo *rbo;
2686
2687 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2688 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2689 r = amdgpu_bo_reserve(rbo, false);
2690 if (unlikely(r))
2691 DRM_ERROR("failed to reserve rbo before unpin\n");
2692 else {
2693 amdgpu_bo_unpin(rbo);
2694 amdgpu_bo_unreserve(rbo);
2695 }
2696 }
2697 /* disable the GRPH */
2698 dce_v8_0_grph_enable(crtc, false);
2699
2700 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2701
2702 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2703 if (adev->mode_info.crtcs[i] &&
2704 adev->mode_info.crtcs[i]->enabled &&
2705 i != amdgpu_crtc->crtc_id &&
2706 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2707 /* one other crtc is using this pll don't turn
2708 * off the pll
2709 */
2710 goto done;
2711 }
2712 }
2713
2714 switch (amdgpu_crtc->pll_id) {
2715 case ATOM_PPLL1:
2716 case ATOM_PPLL2:
2717 /* disable the ppll */
2718 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2719 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2720 break;
2721 case ATOM_PPLL0:
2722 /* disable the ppll */
2723 if ((adev->asic_type == CHIP_KAVERI) ||
2724 (adev->asic_type == CHIP_BONAIRE) ||
2725 (adev->asic_type == CHIP_HAWAII))
2726 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2727 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2728 break;
2729 default:
2730 break;
2731 }
2732done:
2733 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2734 amdgpu_crtc->adjusted_clock = 0;
2735 amdgpu_crtc->encoder = NULL;
2736 amdgpu_crtc->connector = NULL;
2737}
2738
2739static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2740 struct drm_display_mode *mode,
2741 struct drm_display_mode *adjusted_mode,
2742 int x, int y, struct drm_framebuffer *old_fb)
2743{
2744 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2745
2746 if (!amdgpu_crtc->adjusted_clock)
2747 return -EINVAL;
2748
2749 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2750 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2751 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2752 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2753 amdgpu_atombios_crtc_scaler_setup(crtc);
fd70cf63 2754 dce_v8_0_cursor_reset(crtc);
a2e73f56
AD
2755 /* update the hw version fpr dpm */
2756 amdgpu_crtc->hw_mode = *adjusted_mode;
2757
2758 return 0;
2759}
2760
2761static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2762 const struct drm_display_mode *mode,
2763 struct drm_display_mode *adjusted_mode)
2764{
2765 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2766 struct drm_device *dev = crtc->dev;
2767 struct drm_encoder *encoder;
2768
2769 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2770 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2771 if (encoder->crtc == crtc) {
2772 amdgpu_crtc->encoder = encoder;
2773 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2774 break;
2775 }
2776 }
2777 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2778 amdgpu_crtc->encoder = NULL;
2779 amdgpu_crtc->connector = NULL;
2780 return false;
2781 }
2782 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2783 return false;
2784 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2785 return false;
2786 /* pick pll */
2787 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2788 /* if we can't get a PPLL for a non-DP encoder, fail */
2789 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2790 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2791 return false;
2792
2793 return true;
2794}
2795
2796static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2797 struct drm_framebuffer *old_fb)
2798{
2799 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2800}
2801
2802static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2803 struct drm_framebuffer *fb,
2804 int x, int y, enum mode_set_atomic state)
2805{
2806 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2807}
2808
2809static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2810 .dpms = dce_v8_0_crtc_dpms,
2811 .mode_fixup = dce_v8_0_crtc_mode_fixup,
2812 .mode_set = dce_v8_0_crtc_mode_set,
2813 .mode_set_base = dce_v8_0_crtc_set_base,
2814 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2815 .prepare = dce_v8_0_crtc_prepare,
2816 .commit = dce_v8_0_crtc_commit,
2817 .load_lut = dce_v8_0_crtc_load_lut,
2818 .disable = dce_v8_0_crtc_disable,
2819};
2820
2821static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2822{
2823 struct amdgpu_crtc *amdgpu_crtc;
2824 int i;
2825
2826 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2827 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2828 if (amdgpu_crtc == NULL)
2829 return -ENOMEM;
2830
2831 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2832
2833 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2834 amdgpu_crtc->crtc_id = index;
2835 amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
2836 adev->mode_info.crtcs[index] = amdgpu_crtc;
2837
2838 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2839 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2840 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2841 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2842
2843 for (i = 0; i < 256; i++) {
2844 amdgpu_crtc->lut_r[i] = i << 2;
2845 amdgpu_crtc->lut_g[i] = i << 2;
2846 amdgpu_crtc->lut_b[i] = i << 2;
2847 }
2848
2849 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2850
2851 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2852 amdgpu_crtc->adjusted_clock = 0;
2853 amdgpu_crtc->encoder = NULL;
2854 amdgpu_crtc->connector = NULL;
2855 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2856
2857 return 0;
2858}
2859
5fc3aeeb 2860static int dce_v8_0_early_init(void *handle)
a2e73f56 2861{
5fc3aeeb 2862 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2863
a2e73f56
AD
2864 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2865 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2866
2867 dce_v8_0_set_display_funcs(adev);
2868 dce_v8_0_set_irq_funcs(adev);
2869
2870 switch (adev->asic_type) {
2871 case CHIP_BONAIRE:
2872 case CHIP_HAWAII:
2873 adev->mode_info.num_crtc = 6;
2874 adev->mode_info.num_hpd = 6;
2875 adev->mode_info.num_dig = 6;
2876 break;
2877 case CHIP_KAVERI:
2878 adev->mode_info.num_crtc = 4;
2879 adev->mode_info.num_hpd = 6;
2880 adev->mode_info.num_dig = 7;
2881 break;
2882 case CHIP_KABINI:
2883 case CHIP_MULLINS:
2884 adev->mode_info.num_crtc = 2;
2885 adev->mode_info.num_hpd = 6;
2886 adev->mode_info.num_dig = 6; /* ? */
2887 break;
2888 default:
2889 /* FIXME: not supported yet */
2890 return -EINVAL;
2891 }
2892
2893 return 0;
2894}
2895
5fc3aeeb 2896static int dce_v8_0_sw_init(void *handle)
a2e73f56
AD
2897{
2898 int r, i;
5fc3aeeb 2899 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
2900
2901 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2902 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2903 if (r)
2904 return r;
2905 }
2906
2907 for (i = 8; i < 20; i += 2) {
2908 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2909 if (r)
2910 return r;
2911 }
2912
2913 /* HPD hotplug */
2914 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2915 if (r)
2916 return r;
2917
2918 adev->mode_info.mode_config_initialized = true;
2919
2920 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2921
2922 adev->ddev->mode_config.max_width = 16384;
2923 adev->ddev->mode_config.max_height = 16384;
2924
2925 adev->ddev->mode_config.preferred_depth = 24;
2926 adev->ddev->mode_config.prefer_shadow = 1;
2927
2928 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2929
2930 r = amdgpu_modeset_create_props(adev);
2931 if (r)
2932 return r;
2933
2934 adev->ddev->mode_config.max_width = 16384;
2935 adev->ddev->mode_config.max_height = 16384;
2936
2937 /* allocate crtcs */
2938 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2939 r = dce_v8_0_crtc_init(adev, i);
2940 if (r)
2941 return r;
2942 }
2943
2944 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2945 amdgpu_print_display_setup(adev->ddev);
2946 else
2947 return -EINVAL;
2948
2949 /* setup afmt */
2950 dce_v8_0_afmt_init(adev);
2951
2952 r = dce_v8_0_audio_init(adev);
2953 if (r)
2954 return r;
2955
2956 drm_kms_helper_poll_init(adev->ddev);
2957
2958 return r;
2959}
2960
5fc3aeeb 2961static int dce_v8_0_sw_fini(void *handle)
a2e73f56 2962{
5fc3aeeb 2963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2964
a2e73f56
AD
2965 kfree(adev->mode_info.bios_hardcoded_edid);
2966
2967 drm_kms_helper_poll_fini(adev->ddev);
2968
2969 dce_v8_0_audio_fini(adev);
2970
2971 dce_v8_0_afmt_fini(adev);
2972
2973 drm_mode_config_cleanup(adev->ddev);
2974 adev->mode_info.mode_config_initialized = false;
2975
2976 return 0;
2977}
2978
5fc3aeeb 2979static int dce_v8_0_hw_init(void *handle)
a2e73f56
AD
2980{
2981 int i;
5fc3aeeb 2982 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
2983
2984 /* init dig PHYs, disp eng pll */
2985 amdgpu_atombios_encoder_init_dig(adev);
2986 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2987
2988 /* initialize hpd */
2989 dce_v8_0_hpd_init(adev);
2990
2991 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2992 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2993 }
2994
2995 return 0;
2996}
2997
5fc3aeeb 2998static int dce_v8_0_hw_fini(void *handle)
a2e73f56
AD
2999{
3000 int i;
5fc3aeeb 3001 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
3002
3003 dce_v8_0_hpd_fini(adev);
3004
3005 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3006 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3007 }
3008
3009 return 0;
3010}
3011
5fc3aeeb 3012static int dce_v8_0_suspend(void *handle)
a2e73f56 3013{
5fc3aeeb 3014 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56 3015
a2e73f56
AD
3016 amdgpu_atombios_scratch_regs_save(adev);
3017
3018 dce_v8_0_hpd_fini(adev);
3019
3020 return 0;
3021}
3022
5fc3aeeb 3023static int dce_v8_0_resume(void *handle)
a2e73f56 3024{
5fc3aeeb 3025 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
3026
3027 amdgpu_atombios_scratch_regs_restore(adev);
3028
3029 /* init dig PHYs, disp eng pll */
3030 amdgpu_atombios_encoder_init_dig(adev);
3031 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3032 /* turn on the BL */
3033 if (adev->mode_info.bl_encoder) {
3034 u8 bl_level = amdgpu_display_backlight_get_level(adev,
3035 adev->mode_info.bl_encoder);
3036 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3037 bl_level);
3038 }
3039
3040 /* initialize hpd */
3041 dce_v8_0_hpd_init(adev);
3042
a2e73f56
AD
3043 return 0;
3044}
3045
5fc3aeeb 3046static bool dce_v8_0_is_idle(void *handle)
a2e73f56 3047{
a2e73f56
AD
3048 return true;
3049}
3050
5fc3aeeb 3051static int dce_v8_0_wait_for_idle(void *handle)
a2e73f56 3052{
a2e73f56
AD
3053 return 0;
3054}
3055
5fc3aeeb 3056static void dce_v8_0_print_status(void *handle)
a2e73f56 3057{
5fc3aeeb 3058 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3059
a2e73f56
AD
3060 dev_info(adev->dev, "DCE 8.x registers\n");
3061 /* XXX todo */
3062}
3063
5fc3aeeb 3064static int dce_v8_0_soft_reset(void *handle)
a2e73f56
AD
3065{
3066 u32 srbm_soft_reset = 0, tmp;
5fc3aeeb 3067 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
a2e73f56
AD
3068
3069 if (dce_v8_0_is_display_hung(adev))
3070 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3071
3072 if (srbm_soft_reset) {
5fc3aeeb 3073 dce_v8_0_print_status((void *)adev);
a2e73f56
AD
3074
3075 tmp = RREG32(mmSRBM_SOFT_RESET);
3076 tmp |= srbm_soft_reset;
3077 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3078 WREG32(mmSRBM_SOFT_RESET, tmp);
3079 tmp = RREG32(mmSRBM_SOFT_RESET);
3080
3081 udelay(50);
3082
3083 tmp &= ~srbm_soft_reset;
3084 WREG32(mmSRBM_SOFT_RESET, tmp);
3085 tmp = RREG32(mmSRBM_SOFT_RESET);
3086
3087 /* Wait a little for things to settle down */
3088 udelay(50);
5fc3aeeb 3089 dce_v8_0_print_status((void *)adev);
a2e73f56
AD
3090 }
3091 return 0;
3092}
3093
3094static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3095 int crtc,
3096 enum amdgpu_interrupt_state state)
3097{
3098 u32 reg_block, lb_interrupt_mask;
3099
3100 if (crtc >= adev->mode_info.num_crtc) {
3101 DRM_DEBUG("invalid crtc %d\n", crtc);
3102 return;
3103 }
3104
3105 switch (crtc) {
3106 case 0:
3107 reg_block = CRTC0_REGISTER_OFFSET;
3108 break;
3109 case 1:
3110 reg_block = CRTC1_REGISTER_OFFSET;
3111 break;
3112 case 2:
3113 reg_block = CRTC2_REGISTER_OFFSET;
3114 break;
3115 case 3:
3116 reg_block = CRTC3_REGISTER_OFFSET;
3117 break;
3118 case 4:
3119 reg_block = CRTC4_REGISTER_OFFSET;
3120 break;
3121 case 5:
3122 reg_block = CRTC5_REGISTER_OFFSET;
3123 break;
3124 default:
3125 DRM_DEBUG("invalid crtc %d\n", crtc);
3126 return;
3127 }
3128
3129 switch (state) {
3130 case AMDGPU_IRQ_STATE_DISABLE:
3131 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3132 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3133 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3134 break;
3135 case AMDGPU_IRQ_STATE_ENABLE:
3136 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3137 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
3138 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3139 break;
3140 default:
3141 break;
3142 }
3143}
3144
3145static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3146 int crtc,
3147 enum amdgpu_interrupt_state state)
3148{
3149 u32 reg_block, lb_interrupt_mask;
3150
3151 if (crtc >= adev->mode_info.num_crtc) {
3152 DRM_DEBUG("invalid crtc %d\n", crtc);
3153 return;
3154 }
3155
3156 switch (crtc) {
3157 case 0:
3158 reg_block = CRTC0_REGISTER_OFFSET;
3159 break;
3160 case 1:
3161 reg_block = CRTC1_REGISTER_OFFSET;
3162 break;
3163 case 2:
3164 reg_block = CRTC2_REGISTER_OFFSET;
3165 break;
3166 case 3:
3167 reg_block = CRTC3_REGISTER_OFFSET;
3168 break;
3169 case 4:
3170 reg_block = CRTC4_REGISTER_OFFSET;
3171 break;
3172 case 5:
3173 reg_block = CRTC5_REGISTER_OFFSET;
3174 break;
3175 default:
3176 DRM_DEBUG("invalid crtc %d\n", crtc);
3177 return;
3178 }
3179
3180 switch (state) {
3181 case AMDGPU_IRQ_STATE_DISABLE:
3182 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3183 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3184 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3185 break;
3186 case AMDGPU_IRQ_STATE_ENABLE:
3187 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3188 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3189 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3190 break;
3191 default:
3192 break;
3193 }
3194}
3195
3196static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3197 struct amdgpu_irq_src *src,
3198 unsigned type,
3199 enum amdgpu_interrupt_state state)
3200{
3201 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
3202
3203 switch (type) {
3204 case AMDGPU_HPD_1:
3205 dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
3206 break;
3207 case AMDGPU_HPD_2:
3208 dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
3209 break;
3210 case AMDGPU_HPD_3:
3211 dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
3212 break;
3213 case AMDGPU_HPD_4:
3214 dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
3215 break;
3216 case AMDGPU_HPD_5:
3217 dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
3218 break;
3219 case AMDGPU_HPD_6:
3220 dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
3221 break;
3222 default:
3223 DRM_DEBUG("invalid hdp %d\n", type);
3224 return 0;
3225 }
3226
3227 switch (state) {
3228 case AMDGPU_IRQ_STATE_DISABLE:
3229 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
3230 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3231 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
3232 break;
3233 case AMDGPU_IRQ_STATE_ENABLE:
3234 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
3235 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3236 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
3237 break;
3238 default:
3239 break;
3240 }
3241
3242 return 0;
3243}
3244
3245static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3246 struct amdgpu_irq_src *src,
3247 unsigned type,
3248 enum amdgpu_interrupt_state state)
3249{
3250 switch (type) {
3251 case AMDGPU_CRTC_IRQ_VBLANK1:
3252 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3253 break;
3254 case AMDGPU_CRTC_IRQ_VBLANK2:
3255 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3256 break;
3257 case AMDGPU_CRTC_IRQ_VBLANK3:
3258 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3259 break;
3260 case AMDGPU_CRTC_IRQ_VBLANK4:
3261 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3262 break;
3263 case AMDGPU_CRTC_IRQ_VBLANK5:
3264 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3265 break;
3266 case AMDGPU_CRTC_IRQ_VBLANK6:
3267 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3268 break;
3269 case AMDGPU_CRTC_IRQ_VLINE1:
3270 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3271 break;
3272 case AMDGPU_CRTC_IRQ_VLINE2:
3273 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3274 break;
3275 case AMDGPU_CRTC_IRQ_VLINE3:
3276 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3277 break;
3278 case AMDGPU_CRTC_IRQ_VLINE4:
3279 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3280 break;
3281 case AMDGPU_CRTC_IRQ_VLINE5:
3282 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3283 break;
3284 case AMDGPU_CRTC_IRQ_VLINE6:
3285 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3286 break;
3287 default:
3288 break;
3289 }
3290 return 0;
3291}
3292
3293static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3294 struct amdgpu_irq_src *source,
3295 struct amdgpu_iv_entry *entry)
3296{
3297 unsigned crtc = entry->src_id - 1;
3298 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3299 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3300
3301 switch (entry->src_data) {
3302 case 0: /* vblank */
bd833144 3303 if (disp_int & interrupt_status_offsets[crtc].vblank)
a2e73f56 3304 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
bd833144
MK
3305 else
3306 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3307
3308 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3309 drm_handle_vblank(adev->ddev, crtc);
a2e73f56 3310 }
bd833144
MK
3311 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3312
a2e73f56
AD
3313 break;
3314 case 1: /* vline */
bd833144 3315 if (disp_int & interrupt_status_offsets[crtc].vline)
a2e73f56 3316 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
bd833144
MK
3317 else
3318 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3319
3320 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3321
a2e73f56
AD
3322 break;
3323 default:
3324 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3325 break;
3326 }
3327
3328 return 0;
3329}
3330
3331static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3332 struct amdgpu_irq_src *src,
3333 unsigned type,
3334 enum amdgpu_interrupt_state state)
3335{
3336 u32 reg, reg_block;
3337 /* now deal with page flip IRQ */
3338 switch (type) {
3339 case AMDGPU_PAGEFLIP_IRQ_D1:
3340 reg_block = CRTC0_REGISTER_OFFSET;
3341 break;
3342 case AMDGPU_PAGEFLIP_IRQ_D2:
3343 reg_block = CRTC1_REGISTER_OFFSET;
3344 break;
3345 case AMDGPU_PAGEFLIP_IRQ_D3:
3346 reg_block = CRTC2_REGISTER_OFFSET;
3347 break;
3348 case AMDGPU_PAGEFLIP_IRQ_D4:
3349 reg_block = CRTC3_REGISTER_OFFSET;
3350 break;
3351 case AMDGPU_PAGEFLIP_IRQ_D5:
3352 reg_block = CRTC4_REGISTER_OFFSET;
3353 break;
3354 case AMDGPU_PAGEFLIP_IRQ_D6:
3355 reg_block = CRTC5_REGISTER_OFFSET;
3356 break;
3357 default:
3358 DRM_ERROR("invalid pageflip crtc %d\n", type);
3359 return -EINVAL;
3360 }
3361
3362 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
3363 if (state == AMDGPU_IRQ_STATE_DISABLE)
3364 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3365 else
3366 WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3367
3368 return 0;
3369}
3370
3371static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3372 struct amdgpu_irq_src *source,
3373 struct amdgpu_iv_entry *entry)
3374{
3375 int reg_block;
3376 unsigned long flags;
3377 unsigned crtc_id;
3378 struct amdgpu_crtc *amdgpu_crtc;
3379 struct amdgpu_flip_work *works;
3380
3381 crtc_id = (entry->src_id - 8) >> 1;
3382 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3383
3384 /* ack the interrupt */
3385 switch(crtc_id){
3386 case AMDGPU_PAGEFLIP_IRQ_D1:
3387 reg_block = CRTC0_REGISTER_OFFSET;
3388 break;
3389 case AMDGPU_PAGEFLIP_IRQ_D2:
3390 reg_block = CRTC1_REGISTER_OFFSET;
3391 break;
3392 case AMDGPU_PAGEFLIP_IRQ_D3:
3393 reg_block = CRTC2_REGISTER_OFFSET;
3394 break;
3395 case AMDGPU_PAGEFLIP_IRQ_D4:
3396 reg_block = CRTC3_REGISTER_OFFSET;
3397 break;
3398 case AMDGPU_PAGEFLIP_IRQ_D5:
3399 reg_block = CRTC4_REGISTER_OFFSET;
3400 break;
3401 case AMDGPU_PAGEFLIP_IRQ_D6:
3402 reg_block = CRTC5_REGISTER_OFFSET;
3403 break;
3404 default:
3405 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3406 return -EINVAL;
3407 }
3408
3409 if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3410 WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3411
3412 /* IRQ could occur when in initial stage */
3413 if (amdgpu_crtc == NULL)
3414 return 0;
3415
3416 spin_lock_irqsave(&adev->ddev->event_lock, flags);
3417 works = amdgpu_crtc->pflip_works;
3418 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3419 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3420 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3421 amdgpu_crtc->pflip_status,
3422 AMDGPU_FLIP_SUBMITTED);
3423 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3424 return 0;
3425 }
3426
3427 /* page flip completed. clean up */
3428 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3429 amdgpu_crtc->pflip_works = NULL;
3430
3431 /* wakeup usersapce */
3432 if (works->event)
3433 drm_send_vblank_event(adev->ddev, crtc_id, works->event);
3434
3435 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3436
3437 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3438 amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
3439 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
3440
3441 return 0;
3442}
3443
3444static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3445 struct amdgpu_irq_src *source,
3446 struct amdgpu_iv_entry *entry)
3447{
3448 uint32_t disp_int, mask, int_control, tmp;
3449 unsigned hpd;
3450
e922cfb1 3451 if (entry->src_data >= adev->mode_info.num_hpd) {
a2e73f56
AD
3452 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3453 return 0;
3454 }
3455
3456 hpd = entry->src_data;
3457 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3458 mask = interrupt_status_offsets[hpd].hpd;
3459 int_control = hpd_int_control_offsets[hpd];
3460
3461 if (disp_int & mask) {
3462 tmp = RREG32(int_control);
3463 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3464 WREG32(int_control, tmp);
3465 schedule_work(&adev->hotplug_work);
3466 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3467 }
3468
3469 return 0;
3470
3471}
3472
5fc3aeeb 3473static int dce_v8_0_set_clockgating_state(void *handle,
3474 enum amd_clockgating_state state)
a2e73f56
AD
3475{
3476 return 0;
3477}
3478
5fc3aeeb 3479static int dce_v8_0_set_powergating_state(void *handle,
3480 enum amd_powergating_state state)
a2e73f56
AD
3481{
3482 return 0;
3483}
3484
5fc3aeeb 3485const struct amd_ip_funcs dce_v8_0_ip_funcs = {
a2e73f56
AD
3486 .early_init = dce_v8_0_early_init,
3487 .late_init = NULL,
3488 .sw_init = dce_v8_0_sw_init,
3489 .sw_fini = dce_v8_0_sw_fini,
3490 .hw_init = dce_v8_0_hw_init,
3491 .hw_fini = dce_v8_0_hw_fini,
3492 .suspend = dce_v8_0_suspend,
3493 .resume = dce_v8_0_resume,
3494 .is_idle = dce_v8_0_is_idle,
3495 .wait_for_idle = dce_v8_0_wait_for_idle,
3496 .soft_reset = dce_v8_0_soft_reset,
3497 .print_status = dce_v8_0_print_status,
3498 .set_clockgating_state = dce_v8_0_set_clockgating_state,
3499 .set_powergating_state = dce_v8_0_set_powergating_state,
3500};
3501
3502static void
3503dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3504 struct drm_display_mode *mode,
3505 struct drm_display_mode *adjusted_mode)
3506{
3507 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3508
3509 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3510
3511 /* need to call this here rather than in prepare() since we need some crtc info */
3512 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3513
3514 /* set scaler clears this on some chips */
3515 dce_v8_0_set_interleave(encoder->crtc, mode);
3516
3517 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3518 dce_v8_0_afmt_enable(encoder, true);
3519 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3520 }
3521}
3522
3523static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3524{
3525 struct amdgpu_device *adev = encoder->dev->dev_private;
3526 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3527 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3528
3529 if ((amdgpu_encoder->active_device &
3530 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3531 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3532 ENCODER_OBJECT_ID_NONE)) {
3533 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3534 if (dig) {
3535 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3536 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3537 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3538 }
3539 }
3540
3541 amdgpu_atombios_scratch_regs_lock(adev, true);
3542
3543 if (connector) {
3544 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3545
3546 /* select the clock/data port if it uses a router */
3547 if (amdgpu_connector->router.cd_valid)
3548 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3549
3550 /* turn eDP panel on for mode set */
3551 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3552 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3553 ATOM_TRANSMITTER_ACTION_POWER_ON);
3554 }
3555
3556 /* this is needed for the pll/ss setup to work correctly in some cases */
3557 amdgpu_atombios_encoder_set_crtc_source(encoder);
3558 /* set up the FMT blocks */
3559 dce_v8_0_program_fmt(encoder);
3560}
3561
3562static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3563{
3564 struct drm_device *dev = encoder->dev;
3565 struct amdgpu_device *adev = dev->dev_private;
3566
3567 /* need to call this here as we need the crtc set up */
3568 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3569 amdgpu_atombios_scratch_regs_lock(adev, false);
3570}
3571
3572static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3573{
3574 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3575 struct amdgpu_encoder_atom_dig *dig;
3576
3577 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3578
3579 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3580 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3581 dce_v8_0_afmt_enable(encoder, false);
3582 dig = amdgpu_encoder->enc_priv;
3583 dig->dig_encoder = -1;
3584 }
3585 amdgpu_encoder->active_device = 0;
3586}
3587
3588/* these are handled by the primary encoders */
3589static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3590{
3591
3592}
3593
3594static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3595{
3596
3597}
3598
3599static void
3600dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3601 struct drm_display_mode *mode,
3602 struct drm_display_mode *adjusted_mode)
3603{
3604
3605}
3606
3607static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3608{
3609
3610}
3611
3612static void
3613dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3614{
3615
3616}
3617
3618static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder,
3619 const struct drm_display_mode *mode,
3620 struct drm_display_mode *adjusted_mode)
3621{
3622 return true;
3623}
3624
3625static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3626 .dpms = dce_v8_0_ext_dpms,
3627 .mode_fixup = dce_v8_0_ext_mode_fixup,
3628 .prepare = dce_v8_0_ext_prepare,
3629 .mode_set = dce_v8_0_ext_mode_set,
3630 .commit = dce_v8_0_ext_commit,
3631 .disable = dce_v8_0_ext_disable,
3632 /* no detect for TMDS/LVDS yet */
3633};
3634
3635static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3636 .dpms = amdgpu_atombios_encoder_dpms,
3637 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3638 .prepare = dce_v8_0_encoder_prepare,
3639 .mode_set = dce_v8_0_encoder_mode_set,
3640 .commit = dce_v8_0_encoder_commit,
3641 .disable = dce_v8_0_encoder_disable,
3642 .detect = amdgpu_atombios_encoder_dig_detect,
3643};
3644
3645static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3646 .dpms = amdgpu_atombios_encoder_dpms,
3647 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3648 .prepare = dce_v8_0_encoder_prepare,
3649 .mode_set = dce_v8_0_encoder_mode_set,
3650 .commit = dce_v8_0_encoder_commit,
3651 .detect = amdgpu_atombios_encoder_dac_detect,
3652};
3653
3654static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3655{
3656 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3657 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3658 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3659 kfree(amdgpu_encoder->enc_priv);
3660 drm_encoder_cleanup(encoder);
3661 kfree(amdgpu_encoder);
3662}
3663
3664static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3665 .destroy = dce_v8_0_encoder_destroy,
3666};
3667
3668static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3669 uint32_t encoder_enum,
3670 uint32_t supported_device,
3671 u16 caps)
3672{
3673 struct drm_device *dev = adev->ddev;
3674 struct drm_encoder *encoder;
3675 struct amdgpu_encoder *amdgpu_encoder;
3676
3677 /* see if we already added it */
3678 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3679 amdgpu_encoder = to_amdgpu_encoder(encoder);
3680 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3681 amdgpu_encoder->devices |= supported_device;
3682 return;
3683 }
3684
3685 }
3686
3687 /* add a new one */
3688 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3689 if (!amdgpu_encoder)
3690 return;
3691
3692 encoder = &amdgpu_encoder->base;
3693 switch (adev->mode_info.num_crtc) {
3694 case 1:
3695 encoder->possible_crtcs = 0x1;
3696 break;
3697 case 2:
3698 default:
3699 encoder->possible_crtcs = 0x3;
3700 break;
3701 case 4:
3702 encoder->possible_crtcs = 0xf;
3703 break;
3704 case 6:
3705 encoder->possible_crtcs = 0x3f;
3706 break;
3707 }
3708
3709 amdgpu_encoder->enc_priv = NULL;
3710
3711 amdgpu_encoder->encoder_enum = encoder_enum;
3712 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3713 amdgpu_encoder->devices = supported_device;
3714 amdgpu_encoder->rmx_type = RMX_OFF;
3715 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3716 amdgpu_encoder->is_ext_encoder = false;
3717 amdgpu_encoder->caps = caps;
3718
3719 switch (amdgpu_encoder->encoder_id) {
3720 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3721 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3722 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3723 DRM_MODE_ENCODER_DAC);
3724 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3725 break;
3726 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3727 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3728 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3729 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3730 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3731 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3732 amdgpu_encoder->rmx_type = RMX_FULL;
3733 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3734 DRM_MODE_ENCODER_LVDS);
3735 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3736 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3737 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3738 DRM_MODE_ENCODER_DAC);
3739 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3740 } else {
3741 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3742 DRM_MODE_ENCODER_TMDS);
3743 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3744 }
3745 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3746 break;
3747 case ENCODER_OBJECT_ID_SI170B:
3748 case ENCODER_OBJECT_ID_CH7303:
3749 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3750 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3751 case ENCODER_OBJECT_ID_TITFP513:
3752 case ENCODER_OBJECT_ID_VT1623:
3753 case ENCODER_OBJECT_ID_HDMI_SI1930:
3754 case ENCODER_OBJECT_ID_TRAVIS:
3755 case ENCODER_OBJECT_ID_NUTMEG:
3756 /* these are handled by the primary encoders */
3757 amdgpu_encoder->is_ext_encoder = true;
3758 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3759 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3760 DRM_MODE_ENCODER_LVDS);
3761 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3762 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3763 DRM_MODE_ENCODER_DAC);
3764 else
3765 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3766 DRM_MODE_ENCODER_TMDS);
3767 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3768 break;
3769 }
3770}
3771
3772static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3773 .set_vga_render_state = &dce_v8_0_set_vga_render_state,
3774 .bandwidth_update = &dce_v8_0_bandwidth_update,
3775 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3776 .vblank_wait = &dce_v8_0_vblank_wait,
3777 .is_display_hung = &dce_v8_0_is_display_hung,
3778 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3779 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3780 .hpd_sense = &dce_v8_0_hpd_sense,
3781 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3782 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3783 .page_flip = &dce_v8_0_page_flip,
3784 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3785 .add_encoder = &dce_v8_0_encoder_add,
3786 .add_connector = &amdgpu_connector_add,
3787 .stop_mc_access = &dce_v8_0_stop_mc_access,
3788 .resume_mc_access = &dce_v8_0_resume_mc_access,
3789};
3790
3791static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3792{
3793 if (adev->mode_info.funcs == NULL)
3794 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3795}
3796
3797static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3798 .set = dce_v8_0_set_crtc_interrupt_state,
3799 .process = dce_v8_0_crtc_irq,
3800};
3801
3802static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3803 .set = dce_v8_0_set_pageflip_interrupt_state,
3804 .process = dce_v8_0_pageflip_irq,
3805};
3806
3807static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3808 .set = dce_v8_0_set_hpd_interrupt_state,
3809 .process = dce_v8_0_hpd_irq,
3810};
3811
3812static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3813{
3814 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3815 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3816
3817 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3818 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3819
3820 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3821 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3822}