]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drm/amdgpu/dce8: disable hpd on local panels
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / dce_v6_0.c
CommitLineData
e2cdf640
KW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "drmP.h"
24#include "amdgpu.h"
25#include "amdgpu_pm.h"
26#include "amdgpu_i2c.h"
27#include "atom.h"
28#include "amdgpu_atombios.h"
29#include "atombios_crtc.h"
30#include "atombios_encoders.h"
31#include "amdgpu_pll.h"
32#include "amdgpu_connectors.h"
33#include "si/si_reg.h"
34#include "si/sid.h"
35
36static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
37static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
38
39static const u32 crtc_offsets[6] =
40{
41 SI_CRTC0_REGISTER_OFFSET,
42 SI_CRTC1_REGISTER_OFFSET,
43 SI_CRTC2_REGISTER_OFFSET,
44 SI_CRTC3_REGISTER_OFFSET,
45 SI_CRTC4_REGISTER_OFFSET,
46 SI_CRTC5_REGISTER_OFFSET
47};
48
49static const uint32_t dig_offsets[] = {
50 SI_CRTC0_REGISTER_OFFSET,
51 SI_CRTC1_REGISTER_OFFSET,
52 SI_CRTC2_REGISTER_OFFSET,
53 SI_CRTC3_REGISTER_OFFSET,
54 SI_CRTC4_REGISTER_OFFSET,
55 SI_CRTC5_REGISTER_OFFSET,
56 (0x13830 - 0x7030) >> 2,
57};
58
59static const struct {
60 uint32_t reg;
61 uint32_t vblank;
62 uint32_t vline;
63 uint32_t hpd;
64
65} interrupt_status_offsets[6] = { {
66 .reg = DISP_INTERRUPT_STATUS,
67 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
68 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
69 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
70}, {
71 .reg = DISP_INTERRUPT_STATUS_CONTINUE,
72 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
73 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
74 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
75}, {
76 .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
77 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
78 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
79 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
80}, {
81 .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
82 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
83 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
84 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
85}, {
86 .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
87 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
88 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
89 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
90}, {
91 .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
92 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
93 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
94 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
95} };
96
97static const uint32_t hpd_int_control_offsets[6] = {
98 DC_HPD1_INT_CONTROL,
99 DC_HPD2_INT_CONTROL,
100 DC_HPD3_INT_CONTROL,
101 DC_HPD4_INT_CONTROL,
102 DC_HPD5_INT_CONTROL,
103 DC_HPD6_INT_CONTROL,
104};
105
106static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
107 u32 block_offset, u32 reg)
108{
109 DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
110 return 0;
111}
112
113static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
114 u32 block_offset, u32 reg, u32 v)
115{
116 DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
117}
118
119static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
120{
121 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
122 return true;
123 else
124 return false;
125}
126
127static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
128{
129 u32 pos1, pos2;
130
131 pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
132 pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
133
134 if (pos1 != pos2)
135 return true;
136 else
137 return false;
138}
139
140/**
141 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
142 *
143 * @crtc: crtc to wait for vblank on
144 *
145 * Wait for vblank on the requested crtc (evergreen+).
146 */
147static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
148{
e37e4f05 149 unsigned i = 100;
e2cdf640
KW
150
151 if (crtc >= adev->mode_info.num_crtc)
152 return;
153
154 if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
155 return;
156
157 /* depending on when we hit vblank, we may be close to active; if so,
158 * wait for another frame.
159 */
160 while (dce_v6_0_is_in_vblank(adev, crtc)) {
e37e4f05
TSD
161 if (i++ == 100) {
162 i = 0;
e2cdf640
KW
163 if (!dce_v6_0_is_counter_moving(adev, crtc))
164 break;
165 }
166 }
167
168 while (!dce_v6_0_is_in_vblank(adev, crtc)) {
e37e4f05
TSD
169 if (i++ == 100) {
170 i = 0;
e2cdf640
KW
171 if (!dce_v6_0_is_counter_moving(adev, crtc))
172 break;
173 }
174 }
175}
176
177static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
178{
179 if (crtc >= adev->mode_info.num_crtc)
180 return 0;
181 else
182 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
183}
184
185static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
186{
187 unsigned i;
188
189 /* Enable pflip interrupts */
02124a03 190 for (i = 0; i < adev->mode_info.num_crtc; i++)
e2cdf640
KW
191 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
192}
193
194static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
195{
196 unsigned i;
197
198 /* Disable pflip interrupts */
02124a03 199 for (i = 0; i < adev->mode_info.num_crtc; i++)
e2cdf640
KW
200 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
201}
202
203/**
204 * dce_v6_0_page_flip - pageflip callback.
205 *
206 * @adev: amdgpu_device pointer
207 * @crtc_id: crtc to cleanup pageflip on
208 * @crtc_base: new address of the crtc (GPU MC address)
209 *
210 * Does the actual pageflip (evergreen+).
211 * During vblank we take the crtc lock and wait for the update_pending
212 * bit to go high, when it does, we release the lock, and allow the
213 * double buffered update to take place.
214 * Returns the current update pending status.
215 */
216static void dce_v6_0_page_flip(struct amdgpu_device *adev,
217 int crtc_id, u64 crtc_base, bool async)
218{
219 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
220
221 /* flip at hsync for async, default is vsync */
222 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
223 EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
224 /* update the scanout addresses */
225 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
226 upper_32_bits(crtc_base));
227 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
228 (u32)crtc_base);
229
230 /* post the write */
231 RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
232}
233
234static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
235 u32 *vbl, u32 *position)
236{
237 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
238 return -EINVAL;
239 *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
240 *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
241
242 return 0;
243
244}
245
246/**
247 * dce_v6_0_hpd_sense - hpd sense callback.
248 *
249 * @adev: amdgpu_device pointer
250 * @hpd: hpd (hotplug detect) pin
251 *
252 * Checks if a digital monitor is connected (evergreen+).
253 * Returns true if connected, false if not connected.
254 */
255static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
256 enum amdgpu_hpd_id hpd)
257{
258 bool connected = false;
259
260 switch (hpd) {
261 case AMDGPU_HPD_1:
262 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
263 connected = true;
264 break;
265 case AMDGPU_HPD_2:
266 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
267 connected = true;
268 break;
269 case AMDGPU_HPD_3:
270 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
271 connected = true;
272 break;
273 case AMDGPU_HPD_4:
274 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
275 connected = true;
276 break;
277 case AMDGPU_HPD_5:
278 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
279 connected = true;
280 break;
281 case AMDGPU_HPD_6:
282 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
283 connected = true;
284 break;
285 default:
286 break;
287 }
288
289 return connected;
290}
291
292/**
293 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
294 *
295 * @adev: amdgpu_device pointer
296 * @hpd: hpd (hotplug detect) pin
297 *
298 * Set the polarity of the hpd pin (evergreen+).
299 */
300static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
301 enum amdgpu_hpd_id hpd)
302{
303 u32 tmp;
304 bool connected = dce_v6_0_hpd_sense(adev, hpd);
305
306 switch (hpd) {
307 case AMDGPU_HPD_1:
308 tmp = RREG32(DC_HPD1_INT_CONTROL);
309 if (connected)
310 tmp &= ~DC_HPDx_INT_POLARITY;
311 else
312 tmp |= DC_HPDx_INT_POLARITY;
313 WREG32(DC_HPD1_INT_CONTROL, tmp);
314 break;
315 case AMDGPU_HPD_2:
316 tmp = RREG32(DC_HPD2_INT_CONTROL);
317 if (connected)
318 tmp &= ~DC_HPDx_INT_POLARITY;
319 else
320 tmp |= DC_HPDx_INT_POLARITY;
321 WREG32(DC_HPD2_INT_CONTROL, tmp);
322 break;
323 case AMDGPU_HPD_3:
324 tmp = RREG32(DC_HPD3_INT_CONTROL);
325 if (connected)
326 tmp &= ~DC_HPDx_INT_POLARITY;
327 else
328 tmp |= DC_HPDx_INT_POLARITY;
329 WREG32(DC_HPD3_INT_CONTROL, tmp);
330 break;
331 case AMDGPU_HPD_4:
332 tmp = RREG32(DC_HPD4_INT_CONTROL);
333 if (connected)
334 tmp &= ~DC_HPDx_INT_POLARITY;
335 else
336 tmp |= DC_HPDx_INT_POLARITY;
337 WREG32(DC_HPD4_INT_CONTROL, tmp);
338 break;
339 case AMDGPU_HPD_5:
340 tmp = RREG32(DC_HPD5_INT_CONTROL);
341 if (connected)
342 tmp &= ~DC_HPDx_INT_POLARITY;
343 else
344 tmp |= DC_HPDx_INT_POLARITY;
345 WREG32(DC_HPD5_INT_CONTROL, tmp);
346 break;
347 case AMDGPU_HPD_6:
348 tmp = RREG32(DC_HPD6_INT_CONTROL);
349 if (connected)
350 tmp &= ~DC_HPDx_INT_POLARITY;
351 else
352 tmp |= DC_HPDx_INT_POLARITY;
353 WREG32(DC_HPD6_INT_CONTROL, tmp);
354 break;
355 default:
356 break;
357 }
358}
359
360/**
361 * dce_v6_0_hpd_init - hpd setup callback.
362 *
363 * @adev: amdgpu_device pointer
364 *
365 * Setup the hpd pins used by the card (evergreen+).
366 * Enable the pin, set the polarity, and enable the hpd interrupts.
367 */
368static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
369{
370 struct drm_device *dev = adev->ddev;
371 struct drm_connector *connector;
372 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
373 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
374
375 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
376 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
377
378 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
379 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
380 /* don't try to enable hpd on eDP or LVDS avoid breaking the
381 * aux dp channel on imac and help (but not completely fix)
382 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
383 * also avoid interrupt storms during dpms.
384 */
385 continue;
386 }
387 switch (amdgpu_connector->hpd.hpd) {
388 case AMDGPU_HPD_1:
389 WREG32(DC_HPD1_CONTROL, tmp);
390 break;
391 case AMDGPU_HPD_2:
392 WREG32(DC_HPD2_CONTROL, tmp);
393 break;
394 case AMDGPU_HPD_3:
395 WREG32(DC_HPD3_CONTROL, tmp);
396 break;
397 case AMDGPU_HPD_4:
398 WREG32(DC_HPD4_CONTROL, tmp);
399 break;
400 case AMDGPU_HPD_5:
401 WREG32(DC_HPD5_CONTROL, tmp);
402 break;
403 case AMDGPU_HPD_6:
404 WREG32(DC_HPD6_CONTROL, tmp);
405 break;
406 default:
407 break;
408 }
409 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
410 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
411 }
412
413}
414
415/**
416 * dce_v6_0_hpd_fini - hpd tear down callback.
417 *
418 * @adev: amdgpu_device pointer
419 *
420 * Tear down the hpd pins used by the card (evergreen+).
421 * Disable the hpd interrupts.
422 */
423static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
424{
425 struct drm_device *dev = adev->ddev;
426 struct drm_connector *connector;
427
428 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
429 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
430
431 switch (amdgpu_connector->hpd.hpd) {
432 case AMDGPU_HPD_1:
433 WREG32(DC_HPD1_CONTROL, 0);
434 break;
435 case AMDGPU_HPD_2:
436 WREG32(DC_HPD2_CONTROL, 0);
437 break;
438 case AMDGPU_HPD_3:
439 WREG32(DC_HPD3_CONTROL, 0);
440 break;
441 case AMDGPU_HPD_4:
442 WREG32(DC_HPD4_CONTROL, 0);
443 break;
444 case AMDGPU_HPD_5:
445 WREG32(DC_HPD5_CONTROL, 0);
446 break;
447 case AMDGPU_HPD_6:
448 WREG32(DC_HPD6_CONTROL, 0);
449 break;
450 default:
451 break;
452 }
453 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
454 }
455}
456
457static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
458{
459 return SI_DC_GPIO_HPD_A;
460}
461
462static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
463{
464 DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
465
466 return true;
467}
468
469static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
470{
471 if (crtc >= adev->mode_info.num_crtc)
472 return 0;
473 else
474 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
475}
476
477static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
478 struct amdgpu_mode_mc_save *save)
479{
480 u32 crtc_enabled, tmp, frame_count;
481 int i, j;
482
483 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
484 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
485
486 /* disable VGA render */
487 WREG32(VGA_RENDER_CONTROL, 0);
488
489 /* blank the display controllers */
490 for (i = 0; i < adev->mode_info.num_crtc; i++) {
491 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
492 if (crtc_enabled) {
493 save->crtc_enabled[i] = true;
494 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
495
496 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
497 dce_v6_0_vblank_wait(adev, i);
498 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
499 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
500 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
501 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
502 }
503 /* wait for the next frame */
504 frame_count = evergreen_get_vblank_counter(adev, i);
505 for (j = 0; j < adev->usec_timeout; j++) {
506 if (evergreen_get_vblank_counter(adev, i) != frame_count)
507 break;
508 udelay(1);
509 }
510
511 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
512 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
513 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
514 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
515 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
516 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
517 save->crtc_enabled[i] = false;
518 /* ***** */
519 } else {
520 save->crtc_enabled[i] = false;
521 }
522 }
523}
524
525static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
526 struct amdgpu_mode_mc_save *save)
527{
528 u32 tmp;
529 int i, j;
530
531 /* update crtc base addresses */
532 for (i = 0; i < adev->mode_info.num_crtc; i++) {
533 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
534 upper_32_bits(adev->mc.vram_start));
535 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
536 upper_32_bits(adev->mc.vram_start));
537 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
538 (u32)adev->mc.vram_start);
539 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
540 (u32)adev->mc.vram_start);
541 }
542
543 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
544 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
545
546 /* unlock regs and wait for update */
547 for (i = 0; i < adev->mode_info.num_crtc; i++) {
548 if (save->crtc_enabled[i]) {
549 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
550 if ((tmp & 0x7) != 3) {
551 tmp &= ~0x7;
552 tmp |= 0x3;
553 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
554 }
555 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
556 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
557 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
558 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
559 }
560 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
561 if (tmp & 1) {
562 tmp &= ~1;
563 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
564 }
565 for (j = 0; j < adev->usec_timeout; j++) {
566 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
567 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
568 break;
569 udelay(1);
570 }
571 }
572 }
573
574 /* Unlock vga access */
575 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
576 mdelay(1);
577 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
578
579}
580
581static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
582 bool render)
583{
584 if (!render)
585 WREG32(R_000300_VGA_RENDER_CONTROL,
586 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
587
588}
589
590static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
591{
592
593 struct drm_device *dev = encoder->dev;
594 struct amdgpu_device *adev = dev->dev_private;
595 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
596 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
597 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
598 int bpc = 0;
599 u32 tmp = 0;
600 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
601
602 if (connector) {
603 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
604 bpc = amdgpu_connector_get_monitor_bpc(connector);
605 dither = amdgpu_connector->dither;
606 }
607
608 /* LVDS FMT is set up by atom */
609 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
610 return;
611
612 if (bpc == 0)
613 return;
614
615
616 switch (bpc) {
617 case 6:
618 if (dither == AMDGPU_FMT_DITHER_ENABLE)
619 /* XXX sort out optimal dither settings */
620 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
621 FMT_SPATIAL_DITHER_EN);
622 else
623 tmp |= FMT_TRUNCATE_EN;
624 break;
625 case 8:
626 if (dither == AMDGPU_FMT_DITHER_ENABLE)
627 /* XXX sort out optimal dither settings */
628 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
629 FMT_RGB_RANDOM_ENABLE |
630 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
631 else
632 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
633 break;
634 case 10:
635 default:
636 /* not needed */
637 break;
638 }
639
640 WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
641}
642
643/**
644 * cik_get_number_of_dram_channels - get the number of dram channels
645 *
646 * @adev: amdgpu_device pointer
647 *
648 * Look up the number of video ram channels (CIK).
649 * Used for display watermark bandwidth calculations
650 * Returns the number of dram channels
651 */
652static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
653{
654 u32 tmp = RREG32(MC_SHARED_CHMAP);
655
656 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
657 case 0:
658 default:
659 return 1;
660 case 1:
661 return 2;
662 case 2:
663 return 4;
664 case 3:
665 return 8;
666 case 4:
667 return 3;
668 case 5:
669 return 6;
670 case 6:
671 return 10;
672 case 7:
673 return 12;
674 case 8:
675 return 16;
676 }
677}
678
679struct dce6_wm_params {
680 u32 dram_channels; /* number of dram channels */
681 u32 yclk; /* bandwidth per dram data pin in kHz */
682 u32 sclk; /* engine clock in kHz */
683 u32 disp_clk; /* display clock in kHz */
684 u32 src_width; /* viewport width */
685 u32 active_time; /* active display time in ns */
686 u32 blank_time; /* blank time in ns */
687 bool interlaced; /* mode is interlaced */
688 fixed20_12 vsc; /* vertical scale ratio */
689 u32 num_heads; /* number of active crtcs */
690 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
691 u32 lb_size; /* line buffer allocated to pipe */
692 u32 vtaps; /* vertical scaler taps */
693};
694
695/**
696 * dce_v6_0_dram_bandwidth - get the dram bandwidth
697 *
698 * @wm: watermark calculation data
699 *
700 * Calculate the raw dram bandwidth (CIK).
701 * Used for display watermark bandwidth calculations
702 * Returns the dram bandwidth in MBytes/s
703 */
704static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
705{
706 /* Calculate raw DRAM Bandwidth */
707 fixed20_12 dram_efficiency; /* 0.7 */
708 fixed20_12 yclk, dram_channels, bandwidth;
709 fixed20_12 a;
710
711 a.full = dfixed_const(1000);
712 yclk.full = dfixed_const(wm->yclk);
713 yclk.full = dfixed_div(yclk, a);
714 dram_channels.full = dfixed_const(wm->dram_channels * 4);
715 a.full = dfixed_const(10);
716 dram_efficiency.full = dfixed_const(7);
717 dram_efficiency.full = dfixed_div(dram_efficiency, a);
718 bandwidth.full = dfixed_mul(dram_channels, yclk);
719 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
720
721 return dfixed_trunc(bandwidth);
722}
723
724/**
725 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
726 *
727 * @wm: watermark calculation data
728 *
729 * Calculate the dram bandwidth used for display (CIK).
730 * Used for display watermark bandwidth calculations
731 * Returns the dram bandwidth for display in MBytes/s
732 */
733static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
734{
735 /* Calculate DRAM Bandwidth and the part allocated to display. */
736 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
737 fixed20_12 yclk, dram_channels, bandwidth;
738 fixed20_12 a;
739
740 a.full = dfixed_const(1000);
741 yclk.full = dfixed_const(wm->yclk);
742 yclk.full = dfixed_div(yclk, a);
743 dram_channels.full = dfixed_const(wm->dram_channels * 4);
744 a.full = dfixed_const(10);
745 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
746 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
747 bandwidth.full = dfixed_mul(dram_channels, yclk);
748 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
749
750 return dfixed_trunc(bandwidth);
751}
752
753/**
754 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
755 *
756 * @wm: watermark calculation data
757 *
758 * Calculate the data return bandwidth used for display (CIK).
759 * Used for display watermark bandwidth calculations
760 * Returns the data return bandwidth in MBytes/s
761 */
762static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
763{
764 /* Calculate the display Data return Bandwidth */
765 fixed20_12 return_efficiency; /* 0.8 */
766 fixed20_12 sclk, bandwidth;
767 fixed20_12 a;
768
769 a.full = dfixed_const(1000);
770 sclk.full = dfixed_const(wm->sclk);
771 sclk.full = dfixed_div(sclk, a);
772 a.full = dfixed_const(10);
773 return_efficiency.full = dfixed_const(8);
774 return_efficiency.full = dfixed_div(return_efficiency, a);
775 a.full = dfixed_const(32);
776 bandwidth.full = dfixed_mul(a, sclk);
777 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
778
779 return dfixed_trunc(bandwidth);
780}
781
782/**
783 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
784 *
785 * @wm: watermark calculation data
786 *
787 * Calculate the dmif bandwidth used for display (CIK).
788 * Used for display watermark bandwidth calculations
789 * Returns the dmif bandwidth in MBytes/s
790 */
791static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
792{
793 /* Calculate the DMIF Request Bandwidth */
794 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
795 fixed20_12 disp_clk, bandwidth;
796 fixed20_12 a, b;
797
798 a.full = dfixed_const(1000);
799 disp_clk.full = dfixed_const(wm->disp_clk);
800 disp_clk.full = dfixed_div(disp_clk, a);
801 a.full = dfixed_const(32);
802 b.full = dfixed_mul(a, disp_clk);
803
804 a.full = dfixed_const(10);
805 disp_clk_request_efficiency.full = dfixed_const(8);
806 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
807
808 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
809
810 return dfixed_trunc(bandwidth);
811}
812
813/**
814 * dce_v6_0_available_bandwidth - get the min available bandwidth
815 *
816 * @wm: watermark calculation data
817 *
818 * Calculate the min available bandwidth used for display (CIK).
819 * Used for display watermark bandwidth calculations
820 * Returns the min available bandwidth in MBytes/s
821 */
822static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
823{
824 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
825 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
826 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
827 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
828
829 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
830}
831
832/**
833 * dce_v6_0_average_bandwidth - get the average available bandwidth
834 *
835 * @wm: watermark calculation data
836 *
837 * Calculate the average available bandwidth used for display (CIK).
838 * Used for display watermark bandwidth calculations
839 * Returns the average available bandwidth in MBytes/s
840 */
841static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
842{
843 /* Calculate the display mode Average Bandwidth
844 * DisplayMode should contain the source and destination dimensions,
845 * timing, etc.
846 */
847 fixed20_12 bpp;
848 fixed20_12 line_time;
849 fixed20_12 src_width;
850 fixed20_12 bandwidth;
851 fixed20_12 a;
852
853 a.full = dfixed_const(1000);
854 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
855 line_time.full = dfixed_div(line_time, a);
856 bpp.full = dfixed_const(wm->bytes_per_pixel);
857 src_width.full = dfixed_const(wm->src_width);
858 bandwidth.full = dfixed_mul(src_width, bpp);
859 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
860 bandwidth.full = dfixed_div(bandwidth, line_time);
861
862 return dfixed_trunc(bandwidth);
863}
864
865/**
866 * dce_v6_0_latency_watermark - get the latency watermark
867 *
868 * @wm: watermark calculation data
869 *
870 * Calculate the latency watermark (CIK).
871 * Used for display watermark bandwidth calculations
872 * Returns the latency watermark in ns
873 */
874static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
875{
876 /* First calculate the latency in ns */
877 u32 mc_latency = 2000; /* 2000 ns. */
878 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
879 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
880 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
881 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
882 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
883 (wm->num_heads * cursor_line_pair_return_time);
884 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
885 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
886 u32 tmp, dmif_size = 12288;
887 fixed20_12 a, b, c;
888
889 if (wm->num_heads == 0)
890 return 0;
891
892 a.full = dfixed_const(2);
893 b.full = dfixed_const(1);
894 if ((wm->vsc.full > a.full) ||
895 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
896 (wm->vtaps >= 5) ||
897 ((wm->vsc.full >= a.full) && wm->interlaced))
898 max_src_lines_per_dst_line = 4;
899 else
900 max_src_lines_per_dst_line = 2;
901
902 a.full = dfixed_const(available_bandwidth);
903 b.full = dfixed_const(wm->num_heads);
904 a.full = dfixed_div(a, b);
905
906 b.full = dfixed_const(mc_latency + 512);
907 c.full = dfixed_const(wm->disp_clk);
908 b.full = dfixed_div(b, c);
909
910 c.full = dfixed_const(dmif_size);
911 b.full = dfixed_div(c, b);
912
913 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
914
915 b.full = dfixed_const(1000);
916 c.full = dfixed_const(wm->disp_clk);
917 b.full = dfixed_div(c, b);
918 c.full = dfixed_const(wm->bytes_per_pixel);
919 b.full = dfixed_mul(b, c);
920
921 lb_fill_bw = min(tmp, dfixed_trunc(b));
922
923 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
924 b.full = dfixed_const(1000);
925 c.full = dfixed_const(lb_fill_bw);
926 b.full = dfixed_div(c, b);
927 a.full = dfixed_div(a, b);
928 line_fill_time = dfixed_trunc(a);
929
930 if (line_fill_time < wm->active_time)
931 return latency;
932 else
933 return latency + (line_fill_time - wm->active_time);
934
935}
936
937/**
938 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
939 * average and available dram bandwidth
940 *
941 * @wm: watermark calculation data
942 *
943 * Check if the display average bandwidth fits in the display
944 * dram bandwidth (CIK).
945 * Used for display watermark bandwidth calculations
946 * Returns true if the display fits, false if not.
947 */
948static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
949{
950 if (dce_v6_0_average_bandwidth(wm) <=
951 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
952 return true;
953 else
954 return false;
955}
956
957/**
958 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
959 * average and available bandwidth
960 *
961 * @wm: watermark calculation data
962 *
963 * Check if the display average bandwidth fits in the display
964 * available bandwidth (CIK).
965 * Used for display watermark bandwidth calculations
966 * Returns true if the display fits, false if not.
967 */
968static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
969{
970 if (dce_v6_0_average_bandwidth(wm) <=
971 (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
972 return true;
973 else
974 return false;
975}
976
977/**
978 * dce_v6_0_check_latency_hiding - check latency hiding
979 *
980 * @wm: watermark calculation data
981 *
982 * Check latency hiding (CIK).
983 * Used for display watermark bandwidth calculations
984 * Returns true if the display fits, false if not.
985 */
986static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
987{
988 u32 lb_partitions = wm->lb_size / wm->src_width;
989 u32 line_time = wm->active_time + wm->blank_time;
990 u32 latency_tolerant_lines;
991 u32 latency_hiding;
992 fixed20_12 a;
993
994 a.full = dfixed_const(1);
995 if (wm->vsc.full > a.full)
996 latency_tolerant_lines = 1;
997 else {
998 if (lb_partitions <= (wm->vtaps + 1))
999 latency_tolerant_lines = 1;
1000 else
1001 latency_tolerant_lines = 2;
1002 }
1003
1004 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1005
1006 if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
1007 return true;
1008 else
1009 return false;
1010}
1011
1012/**
1013 * dce_v6_0_program_watermarks - program display watermarks
1014 *
1015 * @adev: amdgpu_device pointer
1016 * @amdgpu_crtc: the selected display controller
1017 * @lb_size: line buffer size
1018 * @num_heads: number of display controllers in use
1019 *
1020 * Calculate and program the display watermarks for the
1021 * selected display controller (CIK).
1022 */
1023static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1024 struct amdgpu_crtc *amdgpu_crtc,
1025 u32 lb_size, u32 num_heads)
1026{
1027 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1028 struct dce6_wm_params wm_low, wm_high;
1029 u32 dram_channels;
1030 u32 pixel_period;
1031 u32 line_time = 0;
1032 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1033 u32 priority_a_mark = 0, priority_b_mark = 0;
1034 u32 priority_a_cnt = PRIORITY_OFF;
1035 u32 priority_b_cnt = PRIORITY_OFF;
1036 u32 tmp, arb_control3;
1037 fixed20_12 a, b, c;
1038
1039 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1040 pixel_period = 1000000 / (u32)mode->clock;
1041 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1042 priority_a_cnt = 0;
1043 priority_b_cnt = 0;
1044
1045 dram_channels = si_get_number_of_dram_channels(adev);
1046
1047 /* watermark for high clocks */
1048 if (adev->pm.dpm_enabled) {
1049 wm_high.yclk =
1050 amdgpu_dpm_get_mclk(adev, false) * 10;
1051 wm_high.sclk =
1052 amdgpu_dpm_get_sclk(adev, false) * 10;
1053 } else {
1054 wm_high.yclk = adev->pm.current_mclk * 10;
1055 wm_high.sclk = adev->pm.current_sclk * 10;
1056 }
1057
1058 wm_high.disp_clk = mode->clock;
1059 wm_high.src_width = mode->crtc_hdisplay;
1060 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1061 wm_high.blank_time = line_time - wm_high.active_time;
1062 wm_high.interlaced = false;
1063 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1064 wm_high.interlaced = true;
1065 wm_high.vsc = amdgpu_crtc->vsc;
1066 wm_high.vtaps = 1;
1067 if (amdgpu_crtc->rmx_type != RMX_OFF)
1068 wm_high.vtaps = 2;
1069 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1070 wm_high.lb_size = lb_size;
1071 wm_high.dram_channels = dram_channels;
1072 wm_high.num_heads = num_heads;
1073
1074 if (adev->pm.dpm_enabled) {
1075 /* watermark for low clocks */
1076 wm_low.yclk =
1077 amdgpu_dpm_get_mclk(adev, true) * 10;
1078 wm_low.sclk =
1079 amdgpu_dpm_get_sclk(adev, true) * 10;
1080 } else {
1081 wm_low.yclk = adev->pm.current_mclk * 10;
1082 wm_low.sclk = adev->pm.current_sclk * 10;
1083 }
1084
1085 wm_low.disp_clk = mode->clock;
1086 wm_low.src_width = mode->crtc_hdisplay;
1087 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1088 wm_low.blank_time = line_time - wm_low.active_time;
1089 wm_low.interlaced = false;
1090 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1091 wm_low.interlaced = true;
1092 wm_low.vsc = amdgpu_crtc->vsc;
1093 wm_low.vtaps = 1;
1094 if (amdgpu_crtc->rmx_type != RMX_OFF)
1095 wm_low.vtaps = 2;
1096 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1097 wm_low.lb_size = lb_size;
1098 wm_low.dram_channels = dram_channels;
1099 wm_low.num_heads = num_heads;
1100
1101 /* set for high clocks */
1102 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1103 /* set for low clocks */
1104 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1105
1106 /* possibly force display priority to high */
1107 /* should really do this at mode validation time... */
1108 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1109 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1110 !dce_v6_0_check_latency_hiding(&wm_high) ||
1111 (adev->mode_info.disp_priority == 2)) {
1112 DRM_DEBUG_KMS("force priority to high\n");
1113 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1114 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1115 }
1116 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1117 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1118 !dce_v6_0_check_latency_hiding(&wm_low) ||
1119 (adev->mode_info.disp_priority == 2)) {
1120 DRM_DEBUG_KMS("force priority to high\n");
1121 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1122 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1123 }
1124
1125 a.full = dfixed_const(1000);
1126 b.full = dfixed_const(mode->clock);
1127 b.full = dfixed_div(b, a);
1128 c.full = dfixed_const(latency_watermark_a);
1129 c.full = dfixed_mul(c, b);
1130 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1131 c.full = dfixed_div(c, a);
1132 a.full = dfixed_const(16);
1133 c.full = dfixed_div(c, a);
1134 priority_a_mark = dfixed_trunc(c);
1135 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1136
1137 a.full = dfixed_const(1000);
1138 b.full = dfixed_const(mode->clock);
1139 b.full = dfixed_div(b, a);
1140 c.full = dfixed_const(latency_watermark_b);
1141 c.full = dfixed_mul(c, b);
1142 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1143 c.full = dfixed_div(c, a);
1144 a.full = dfixed_const(16);
1145 c.full = dfixed_div(c, a);
1146 priority_b_mark = dfixed_trunc(c);
1147 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1148 }
1149
1150 /* select wm A */
1151 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1152 tmp = arb_control3;
1153 tmp &= ~LATENCY_WATERMARK_MASK(3);
1154 tmp |= LATENCY_WATERMARK_MASK(1);
1155 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1156 WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
1157 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1158 LATENCY_HIGH_WATERMARK(line_time)));
1159 /* select wm B */
1160 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1161 tmp &= ~LATENCY_WATERMARK_MASK(3);
1162 tmp |= LATENCY_WATERMARK_MASK(2);
1163 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1164 WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
1165 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1166 LATENCY_HIGH_WATERMARK(line_time)));
1167 /* restore original selection */
1168 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1169
1170 /* write the priority marks */
1171 WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1172 WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1173
1174 /* save values for DPM */
1175 amdgpu_crtc->line_time = line_time;
1176 amdgpu_crtc->wm_high = latency_watermark_a;
1177}
1178
1179/* watermark setup */
1180static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1181 struct amdgpu_crtc *amdgpu_crtc,
1182 struct drm_display_mode *mode,
1183 struct drm_display_mode *other_mode)
1184{
1185 u32 tmp, buffer_alloc, i;
1186 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1187 /*
1188 * Line Buffer Setup
1189 * There are 3 line buffers, each one shared by 2 display controllers.
1190 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1191 * the display controllers. The paritioning is done via one of four
1192 * preset allocations specified in bits 21:20:
1193 * 0 - half lb
1194 * 2 - whole lb, other crtc must be disabled
1195 */
1196 /* this can get tricky if we have two large displays on a paired group
1197 * of crtcs. Ideally for multiple large displays we'd assign them to
1198 * non-linked crtcs for maximum line buffer allocation.
1199 */
1200 if (amdgpu_crtc->base.enabled && mode) {
1201 if (other_mode) {
1202 tmp = 0; /* 1/2 */
1203 buffer_alloc = 1;
1204 } else {
1205 tmp = 2; /* whole */
1206 buffer_alloc = 2;
1207 }
1208 } else {
1209 tmp = 0;
1210 buffer_alloc = 0;
1211 }
1212
1213 WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1214 DC_LB_MEMORY_CONFIG(tmp));
1215
1216 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1217 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1218 for (i = 0; i < adev->usec_timeout; i++) {
1219 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1220 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1221 break;
1222 udelay(1);
1223 }
1224
1225 if (amdgpu_crtc->base.enabled && mode) {
1226 switch (tmp) {
1227 case 0:
1228 default:
1229 return 4096 * 2;
1230 case 2:
1231 return 8192 * 2;
1232 }
1233 }
1234
1235 /* controller not enabled, so no lb used */
1236 return 0;
1237}
1238
1239
1240/**
1241 *
1242 * dce_v6_0_bandwidth_update - program display watermarks
1243 *
1244 * @adev: amdgpu_device pointer
1245 *
1246 * Calculate and program the display watermarks and line
1247 * buffer allocation (CIK).
1248 */
1249static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1250{
1251 struct drm_display_mode *mode0 = NULL;
1252 struct drm_display_mode *mode1 = NULL;
1253 u32 num_heads = 0, lb_size;
1254 int i;
1255
1256 if (!adev->mode_info.mode_config_initialized)
1257 return;
1258
1259 amdgpu_update_display_priority(adev);
1260
1261 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1262 if (adev->mode_info.crtcs[i]->base.enabled)
1263 num_heads++;
1264 }
1265 for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1266 mode0 = &adev->mode_info.crtcs[i]->base.mode;
1267 mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1268 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1269 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1270 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1271 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1272 }
1273}
1274/*
1275static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1276{
1277 int i;
1278 u32 offset, tmp;
1279
1280 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1281 offset = adev->mode_info.audio.pin[i].offset;
1282 tmp = RREG32_AUDIO_ENDPT(offset,
1283 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1284 if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1285 adev->mode_info.audio.pin[i].connected = false;
1286 else
1287 adev->mode_info.audio.pin[i].connected = true;
1288 }
1289
1290}
1291
1292static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1293{
1294 int i;
1295
1296 dce_v6_0_audio_get_connected_pins(adev);
1297
1298 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1299 if (adev->mode_info.audio.pin[i].connected)
1300 return &adev->mode_info.audio.pin[i];
1301 }
1302 DRM_ERROR("No connected audio pins found!\n");
1303 return NULL;
1304}
1305
1306static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1307{
1308 struct amdgpu_device *adev = encoder->dev->dev_private;
1309 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1310 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1311 u32 offset;
1312
1313 if (!dig || !dig->afmt || !dig->afmt->pin)
1314 return;
1315
1316 offset = dig->afmt->offset;
1317
1318 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1319 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1320
1321}
1322
1323static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1324 struct drm_display_mode *mode)
1325{
1326 DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
1327}
1328
1329static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1330{
1331 DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
1332}
1333
1334static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1335{
1336 DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
1337
1338}
1339*/
1340static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1341 struct amdgpu_audio_pin *pin,
1342 bool enable)
1343{
1344 DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
1345}
1346
1347static const u32 pin_offsets[7] =
1348{
1349 (0x1780 - 0x1780),
1350 (0x1786 - 0x1780),
1351 (0x178c - 0x1780),
1352 (0x1792 - 0x1780),
1353 (0x1798 - 0x1780),
1354 (0x179d - 0x1780),
1355 (0x17a4 - 0x1780),
1356};
1357
1358static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1359{
1360 return 0;
1361}
1362
1363static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1364{
1365
1366}
1367
1368/*
1369static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1370{
1371 DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
1372}
1373*/
1374/*
1375 * build a HDMI Video Info Frame
1376 */
1377/*
1378static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1379 void *buffer, size_t size)
1380{
1381 DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
1382}
1383
1384static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1385{
1386 DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
1387}
1388*/
1389/*
1390 * update the info frames with the data from the current display mode
1391 */
1392static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1393 struct drm_display_mode *mode)
1394{
1395 DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
1396}
1397
1398static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1399{
1400 struct drm_device *dev = encoder->dev;
1401 struct amdgpu_device *adev = dev->dev_private;
1402 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1403 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1404
1405 if (!dig || !dig->afmt)
1406 return;
1407
1408 /* Silent, r600_hdmi_enable will raise WARN for us */
1409 if (enable && dig->afmt->enabled)
1410 return;
1411 if (!enable && !dig->afmt->enabled)
1412 return;
1413
1414 if (!enable && dig->afmt->pin) {
1415 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1416 dig->afmt->pin = NULL;
1417 }
1418
1419 dig->afmt->enabled = enable;
1420
1421 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1422 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1423}
1424
beb86f29 1425static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
e2cdf640 1426{
beb86f29 1427 int i, j;
e2cdf640
KW
1428
1429 for (i = 0; i < adev->mode_info.num_dig; i++)
1430 adev->mode_info.afmt[i] = NULL;
1431
beb86f29 1432 /* DCE6 has audio blocks tied to DIG encoders */
e2cdf640
KW
1433 for (i = 0; i < adev->mode_info.num_dig; i++) {
1434 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1435 if (adev->mode_info.afmt[i]) {
1436 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1437 adev->mode_info.afmt[i]->id = i;
beb86f29
TSD
1438 } else {
1439 for (j = 0; j < i; j++) {
1440 kfree(adev->mode_info.afmt[j]);
1441 adev->mode_info.afmt[j] = NULL;
1442 }
1443 DRM_ERROR("Out of memory allocating afmt table\n");
1444 return -ENOMEM;
e2cdf640
KW
1445 }
1446 }
beb86f29 1447 return 0;
e2cdf640
KW
1448}
1449
1450static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1451{
1452 int i;
1453
1454 for (i = 0; i < adev->mode_info.num_dig; i++) {
1455 kfree(adev->mode_info.afmt[i]);
1456 adev->mode_info.afmt[i] = NULL;
1457 }
1458}
1459
1460static const u32 vga_control_regs[6] =
1461{
1462 AVIVO_D1VGA_CONTROL,
1463 AVIVO_D2VGA_CONTROL,
1464 EVERGREEN_D3VGA_CONTROL,
1465 EVERGREEN_D4VGA_CONTROL,
1466 EVERGREEN_D5VGA_CONTROL,
1467 EVERGREEN_D6VGA_CONTROL,
1468};
1469
1470static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1471{
1472 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1473 struct drm_device *dev = crtc->dev;
1474 struct amdgpu_device *adev = dev->dev_private;
1475 u32 vga_control;
1476
1477 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1478 if (enable)
1479 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1480 else
1481 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1482}
1483
1484static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1485{
1486 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1487 struct drm_device *dev = crtc->dev;
1488 struct amdgpu_device *adev = dev->dev_private;
1489
1490 if (enable)
1491 WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1492 else
1493 WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1494}
1495
1496static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1497 struct drm_framebuffer *fb,
1498 int x, int y, int atomic)
1499{
1500 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1501 struct drm_device *dev = crtc->dev;
1502 struct amdgpu_device *adev = dev->dev_private;
1503 struct amdgpu_framebuffer *amdgpu_fb;
1504 struct drm_framebuffer *target_fb;
1505 struct drm_gem_object *obj;
1506 struct amdgpu_bo *rbo;
1507 uint64_t fb_location, tiling_flags;
1508 uint32_t fb_format, fb_pitch_pixels, pipe_config;
1509 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1510 u32 viewport_w, viewport_h;
1511 int r;
1512 bool bypass_lut = false;
1513
1514 /* no fb bound */
1515 if (!atomic && !crtc->primary->fb) {
1516 DRM_DEBUG_KMS("No FB bound\n");
1517 return 0;
1518 }
1519
1520 if (atomic) {
1521 amdgpu_fb = to_amdgpu_framebuffer(fb);
1522 target_fb = fb;
1523 }
1524 else {
1525 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1526 target_fb = crtc->primary->fb;
1527 }
1528
1529 /* If atomic, assume fb object is pinned & idle & fenced and
1530 * just update base pointers
1531 */
1532 obj = amdgpu_fb->obj;
1533 rbo = gem_to_amdgpu_bo(obj);
1534 r = amdgpu_bo_reserve(rbo, false);
1535 if (unlikely(r != 0))
1536 return r;
1537
1538 if (atomic)
1539 fb_location = amdgpu_bo_gpu_offset(rbo);
1540 else {
1541 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1542 if (unlikely(r != 0)) {
1543 amdgpu_bo_unreserve(rbo);
1544 return -EINVAL;
1545 }
1546 }
1547
1548 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
1549 amdgpu_bo_unreserve(rbo);
1550
1551 switch (target_fb->pixel_format) {
1552 case DRM_FORMAT_C8:
1553 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
1554 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
1555 break;
1556 case DRM_FORMAT_XRGB4444:
1557 case DRM_FORMAT_ARGB4444:
1558 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1559 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
1560#ifdef __BIG_ENDIAN
1561 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1562#endif
1563 break;
1564 case DRM_FORMAT_XRGB1555:
1565 case DRM_FORMAT_ARGB1555:
1566 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1567 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
1568#ifdef __BIG_ENDIAN
1569 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1570#endif
1571 break;
1572 case DRM_FORMAT_BGRX5551:
1573 case DRM_FORMAT_BGRA5551:
1574 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1575 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
1576#ifdef __BIG_ENDIAN
1577 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1578#endif
1579 break;
1580 case DRM_FORMAT_RGB565:
1581 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1582 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
1583#ifdef __BIG_ENDIAN
1584 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1585#endif
1586 break;
1587 case DRM_FORMAT_XRGB8888:
1588 case DRM_FORMAT_ARGB8888:
1589 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1590 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
1591#ifdef __BIG_ENDIAN
1592 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1593#endif
1594 break;
1595 case DRM_FORMAT_XRGB2101010:
1596 case DRM_FORMAT_ARGB2101010:
1597 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1598 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
1599#ifdef __BIG_ENDIAN
1600 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1601#endif
1602 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1603 bypass_lut = true;
1604 break;
1605 case DRM_FORMAT_BGRX1010102:
1606 case DRM_FORMAT_BGRA1010102:
1607 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1608 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
1609#ifdef __BIG_ENDIAN
1610 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1611#endif
1612 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1613 bypass_lut = true;
1614 break;
1615 default:
1616 DRM_ERROR("Unsupported screen format %s\n",
1617 drm_get_format_name(target_fb->pixel_format));
1618 return -EINVAL;
1619 }
1620
1621 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1622 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1623
1624 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1625 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1626 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1627 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1628 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1629
1630 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1631 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1632 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1633 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1634 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
1635 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
1636 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1)
1637 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1638
1639 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1640 fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
1641
1642 dce_v6_0_vga_enable(crtc, false);
1643
1644 /* Make sure surface address is updated at vertical blank rather than
1645 * horizontal blank
1646 */
1647 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1648
1649 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1650 upper_32_bits(fb_location));
1651 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1652 upper_32_bits(fb_location));
1653 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1654 (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1655 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1656 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1657 WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1658 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
1659
1660 /*
1661 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1662 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1663 * retain the full precision throughout the pipeline.
1664 */
1665 WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1666 (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
1667 ~EVERGREEN_LUT_10BIT_BYPASS_EN);
1668
1669 if (bypass_lut)
1670 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1671
1672 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1673 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1674 WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1675 WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1676 WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1677 WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1678
1679 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1680 WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1681
1682 dce_v6_0_grph_enable(crtc, true);
1683
1684 WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1685 target_fb->height);
1686 x &= ~3;
1687 y &= ~1;
1688 WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
1689 (x << 16) | y);
1690 viewport_w = crtc->mode.hdisplay;
1691 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1692
1693 WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1694 (viewport_w << 16) | viewport_h);
1695
1696 /* set pageflip to happen anywhere in vblank interval */
1697 WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1698
1699 if (!atomic && fb && fb != crtc->primary->fb) {
1700 amdgpu_fb = to_amdgpu_framebuffer(fb);
1701 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1702 r = amdgpu_bo_reserve(rbo, false);
1703 if (unlikely(r != 0))
1704 return r;
1705 amdgpu_bo_unpin(rbo);
1706 amdgpu_bo_unreserve(rbo);
1707 }
1708
1709 /* Bytes per pixel may have changed */
1710 dce_v6_0_bandwidth_update(adev);
1711
1712 return 0;
1713
1714}
1715
1716static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1717 struct drm_display_mode *mode)
1718{
1719 struct drm_device *dev = crtc->dev;
1720 struct amdgpu_device *adev = dev->dev_private;
1721 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1722
1723 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1724 WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
1725 EVERGREEN_INTERLEAVE_EN);
1726 else
1727 WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1728}
1729
1730static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1731{
1732
1733 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1734 struct drm_device *dev = crtc->dev;
1735 struct amdgpu_device *adev = dev->dev_private;
1736 int i;
1737
1738 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1739
1740 WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1741 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
1742 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
1743 WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1744 NI_GRPH_PRESCALE_BYPASS);
1745 WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1746 NI_OVL_PRESCALE_BYPASS);
1747 WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1748 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
1749 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
1750
1751
1752
1753 WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1754
1755 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1756 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1757 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1758
1759 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1760 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1761 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1762
1763 WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1764 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1765
1766 WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
1767 for (i = 0; i < 256; i++) {
1768 WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1769 (amdgpu_crtc->lut_r[i] << 20) |
1770 (amdgpu_crtc->lut_g[i] << 10) |
1771 (amdgpu_crtc->lut_b[i] << 0));
1772 }
1773
1774 WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1775 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1776 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1777 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1778 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
1779 WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1780 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
1781 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
1782 WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1783 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
1784 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
1785 WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1786 (NI_OUTPUT_CSC_GRPH_MODE(0) |
1787 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
1788 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
1789 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1790
1791
1792}
1793
1794static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1795{
1796 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1797 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1798
1799 switch (amdgpu_encoder->encoder_id) {
1800 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1801 if (dig->linkb)
1802 return 1;
1803 else
1804 return 0;
1805 break;
1806 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1807 if (dig->linkb)
1808 return 3;
1809 else
1810 return 2;
1811 break;
1812 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1813 if (dig->linkb)
1814 return 5;
1815 else
1816 return 4;
1817 break;
1818 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1819 return 6;
1820 break;
1821 default:
1822 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1823 return 0;
1824 }
1825}
1826
1827/**
1828 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1829 *
1830 * @crtc: drm crtc
1831 *
1832 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
1833 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
1834 * monitors a dedicated PPLL must be used. If a particular board has
1835 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1836 * as there is no need to program the PLL itself. If we are not able to
1837 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1838 * avoid messing up an existing monitor.
1839 *
1840 *
1841 */
1842static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1843{
1844 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1845 struct drm_device *dev = crtc->dev;
1846 struct amdgpu_device *adev = dev->dev_private;
1847 u32 pll_in_use;
1848 int pll;
1849
1850 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1851 if (adev->clock.dp_extclk)
1852 /* skip PPLL programming if using ext clock */
1853 return ATOM_PPLL_INVALID;
1854 else
1855 return ATOM_PPLL0;
1856 } else {
1857 /* use the same PPLL for all monitors with the same clock */
1858 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1859 if (pll != ATOM_PPLL_INVALID)
1860 return pll;
1861 }
1862
1863 /* PPLL1, and PPLL2 */
1864 pll_in_use = amdgpu_pll_get_use_mask(crtc);
1865 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1866 return ATOM_PPLL2;
1867 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1868 return ATOM_PPLL1;
1869 DRM_ERROR("unable to allocate a PPLL\n");
1870 return ATOM_PPLL_INVALID;
1871}
1872
1873static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1874{
1875 struct amdgpu_device *adev = crtc->dev->dev_private;
1876 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1877 uint32_t cur_lock;
1878
1879 cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
1880 if (lock)
1881 cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
1882 else
1883 cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
1884 WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1885}
1886
1887static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1888{
1889 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1890 struct amdgpu_device *adev = crtc->dev->dev_private;
1891
1892 WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
1893 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
1894 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
1895
1896
1897}
1898
1899static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1900{
1901 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1902 struct amdgpu_device *adev = crtc->dev->dev_private;
1903
1904 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1905 upper_32_bits(amdgpu_crtc->cursor_addr));
1906 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1907 lower_32_bits(amdgpu_crtc->cursor_addr));
1908
1909 WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
1910 EVERGREEN_CURSOR_EN |
1911 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
1912 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
1913
1914}
1915
1916static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1917 int x, int y)
1918{
1919 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1920 struct amdgpu_device *adev = crtc->dev->dev_private;
1921 int xorigin = 0, yorigin = 0;
1922
1923 int w = amdgpu_crtc->cursor_width;
1924
1925 /* avivo cursor are offset into the total surface */
1926 x += crtc->x;
1927 y += crtc->y;
1928 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1929
1930 if (x < 0) {
1931 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1932 x = 0;
1933 }
1934 if (y < 0) {
1935 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1936 y = 0;
1937 }
1938
1939 WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1940 WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1941 WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
1942 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1943
1944 amdgpu_crtc->cursor_x = x;
1945 amdgpu_crtc->cursor_y = y;
1946 return 0;
1947}
1948
1949static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1950 int x, int y)
1951{
1952 int ret;
1953
1954 dce_v6_0_lock_cursor(crtc, true);
1955 ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1956 dce_v6_0_lock_cursor(crtc, false);
1957
1958 return ret;
1959}
1960
1961static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1962 struct drm_file *file_priv,
1963 uint32_t handle,
1964 uint32_t width,
1965 uint32_t height,
1966 int32_t hot_x,
1967 int32_t hot_y)
1968{
1969 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1970 struct drm_gem_object *obj;
1971 struct amdgpu_bo *aobj;
1972 int ret;
1973
1974 if (!handle) {
1975 /* turn off cursor */
1976 dce_v6_0_hide_cursor(crtc);
1977 obj = NULL;
1978 goto unpin;
1979 }
1980
1981 if ((width > amdgpu_crtc->max_cursor_width) ||
1982 (height > amdgpu_crtc->max_cursor_height)) {
1983 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1984 return -EINVAL;
1985 }
1986
1987 obj = drm_gem_object_lookup(file_priv, handle);
1988 if (!obj) {
1989 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
1990 return -ENOENT;
1991 }
1992
1993 aobj = gem_to_amdgpu_bo(obj);
1994 ret = amdgpu_bo_reserve(aobj, false);
1995 if (ret != 0) {
1996 drm_gem_object_unreference_unlocked(obj);
1997 return ret;
1998 }
1999
2000 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2001 amdgpu_bo_unreserve(aobj);
2002 if (ret) {
2003 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2004 drm_gem_object_unreference_unlocked(obj);
2005 return ret;
2006 }
2007
2008 amdgpu_crtc->cursor_width = width;
2009 amdgpu_crtc->cursor_height = height;
2010
2011 dce_v6_0_lock_cursor(crtc, true);
2012
2013 if (hot_x != amdgpu_crtc->cursor_hot_x ||
2014 hot_y != amdgpu_crtc->cursor_hot_y) {
2015 int x, y;
2016
2017 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2018 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2019
2020 dce_v6_0_cursor_move_locked(crtc, x, y);
2021
2022 amdgpu_crtc->cursor_hot_x = hot_x;
2023 amdgpu_crtc->cursor_hot_y = hot_y;
2024 }
2025
2026 dce_v6_0_show_cursor(crtc);
2027 dce_v6_0_lock_cursor(crtc, false);
2028
2029unpin:
2030 if (amdgpu_crtc->cursor_bo) {
2031 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2032 ret = amdgpu_bo_reserve(aobj, false);
2033 if (likely(ret == 0)) {
2034 amdgpu_bo_unpin(aobj);
2035 amdgpu_bo_unreserve(aobj);
2036 }
2037 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2038 }
2039
2040 amdgpu_crtc->cursor_bo = obj;
2041 return 0;
2042}
2043
2044static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2045{
2046 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2047
2048 if (amdgpu_crtc->cursor_bo) {
2049 dce_v6_0_lock_cursor(crtc, true);
2050
2051 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2052 amdgpu_crtc->cursor_y);
2053
2054 dce_v6_0_show_cursor(crtc);
2055
2056 dce_v6_0_lock_cursor(crtc, false);
2057 }
2058}
2059
2060static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2061 u16 *blue, uint32_t size)
2062{
2063 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2064 int i;
2065
2066 /* userspace palettes are always correct as is */
2067 for (i = 0; i < size; i++) {
2068 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2069 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2070 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2071 }
2072 dce_v6_0_crtc_load_lut(crtc);
2073
2074 return 0;
2075}
2076
2077static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2078{
2079 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2080
2081 drm_crtc_cleanup(crtc);
2082 kfree(amdgpu_crtc);
2083}
2084
2085static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2086 .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2087 .cursor_move = dce_v6_0_crtc_cursor_move,
2088 .gamma_set = dce_v6_0_crtc_gamma_set,
2089 .set_config = amdgpu_crtc_set_config,
2090 .destroy = dce_v6_0_crtc_destroy,
2091 .page_flip_target = amdgpu_crtc_page_flip_target,
2092};
2093
2094static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2095{
2096 struct drm_device *dev = crtc->dev;
2097 struct amdgpu_device *adev = dev->dev_private;
2098 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2099 unsigned type;
2100
2101 switch (mode) {
2102 case DRM_MODE_DPMS_ON:
2103 amdgpu_crtc->enabled = true;
2104 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2105 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2106 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2107 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2108 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2109 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2110 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2111 dce_v6_0_crtc_load_lut(crtc);
2112 break;
2113 case DRM_MODE_DPMS_STANDBY:
2114 case DRM_MODE_DPMS_SUSPEND:
2115 case DRM_MODE_DPMS_OFF:
2116 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
2117 if (amdgpu_crtc->enabled)
2118 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2119 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2120 amdgpu_crtc->enabled = false;
2121 break;
2122 }
2123 /* adjust pm to dpms */
2124 amdgpu_pm_compute_clocks(adev);
2125}
2126
2127static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2128{
2129 /* disable crtc pair power gating before programming */
2130 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2131 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2132 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2133}
2134
2135static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2136{
2137 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2138 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2139}
2140
2141static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2142{
2143
2144 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2145 struct drm_device *dev = crtc->dev;
2146 struct amdgpu_device *adev = dev->dev_private;
2147 struct amdgpu_atom_ss ss;
2148 int i;
2149
2150 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2151 if (crtc->primary->fb) {
2152 int r;
2153 struct amdgpu_framebuffer *amdgpu_fb;
2154 struct amdgpu_bo *rbo;
2155
2156 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2157 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2158 r = amdgpu_bo_reserve(rbo, false);
2159 if (unlikely(r))
2160 DRM_ERROR("failed to reserve rbo before unpin\n");
2161 else {
2162 amdgpu_bo_unpin(rbo);
2163 amdgpu_bo_unreserve(rbo);
2164 }
2165 }
2166 /* disable the GRPH */
2167 dce_v6_0_grph_enable(crtc, false);
2168
2169 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2170
2171 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2172 if (adev->mode_info.crtcs[i] &&
2173 adev->mode_info.crtcs[i]->enabled &&
2174 i != amdgpu_crtc->crtc_id &&
2175 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2176 /* one other crtc is using this pll don't turn
2177 * off the pll
2178 */
2179 goto done;
2180 }
2181 }
2182
2183 switch (amdgpu_crtc->pll_id) {
2184 case ATOM_PPLL1:
2185 case ATOM_PPLL2:
2186 /* disable the ppll */
2187 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2188 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2189 break;
2190 default:
2191 break;
2192 }
2193done:
2194 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2195 amdgpu_crtc->adjusted_clock = 0;
2196 amdgpu_crtc->encoder = NULL;
2197 amdgpu_crtc->connector = NULL;
2198}
2199
2200static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2201 struct drm_display_mode *mode,
2202 struct drm_display_mode *adjusted_mode,
2203 int x, int y, struct drm_framebuffer *old_fb)
2204{
2205 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2206
2207 if (!amdgpu_crtc->adjusted_clock)
2208 return -EINVAL;
2209
2210 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2211 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2212 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2213 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2214 amdgpu_atombios_crtc_scaler_setup(crtc);
2215 dce_v6_0_cursor_reset(crtc);
2216 /* update the hw version fpr dpm */
2217 amdgpu_crtc->hw_mode = *adjusted_mode;
2218
2219 return 0;
2220}
2221
2222static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2223 const struct drm_display_mode *mode,
2224 struct drm_display_mode *adjusted_mode)
2225{
2226
2227 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2228 struct drm_device *dev = crtc->dev;
2229 struct drm_encoder *encoder;
2230
2231 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2232 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2233 if (encoder->crtc == crtc) {
2234 amdgpu_crtc->encoder = encoder;
2235 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2236 break;
2237 }
2238 }
2239 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2240 amdgpu_crtc->encoder = NULL;
2241 amdgpu_crtc->connector = NULL;
2242 return false;
2243 }
2244 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2245 return false;
2246 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2247 return false;
2248 /* pick pll */
2249 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2250 /* if we can't get a PPLL for a non-DP encoder, fail */
2251 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2252 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2253 return false;
2254
2255 return true;
2256}
2257
2258static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2259 struct drm_framebuffer *old_fb)
2260{
2261 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2262}
2263
2264static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2265 struct drm_framebuffer *fb,
2266 int x, int y, enum mode_set_atomic state)
2267{
2268 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2269}
2270
2271static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2272 .dpms = dce_v6_0_crtc_dpms,
2273 .mode_fixup = dce_v6_0_crtc_mode_fixup,
2274 .mode_set = dce_v6_0_crtc_mode_set,
2275 .mode_set_base = dce_v6_0_crtc_set_base,
2276 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2277 .prepare = dce_v6_0_crtc_prepare,
2278 .commit = dce_v6_0_crtc_commit,
2279 .load_lut = dce_v6_0_crtc_load_lut,
2280 .disable = dce_v6_0_crtc_disable,
2281};
2282
2283static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2284{
2285 struct amdgpu_crtc *amdgpu_crtc;
2286 int i;
2287
2288 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2289 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2290 if (amdgpu_crtc == NULL)
2291 return -ENOMEM;
2292
2293 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2294
2295 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2296 amdgpu_crtc->crtc_id = index;
2297 adev->mode_info.crtcs[index] = amdgpu_crtc;
2298
2299 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2300 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2301 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2302 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2303
2304 for (i = 0; i < 256; i++) {
2305 amdgpu_crtc->lut_r[i] = i << 2;
2306 amdgpu_crtc->lut_g[i] = i << 2;
2307 amdgpu_crtc->lut_b[i] = i << 2;
2308 }
2309
2310 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2311
2312 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2313 amdgpu_crtc->adjusted_clock = 0;
2314 amdgpu_crtc->encoder = NULL;
2315 amdgpu_crtc->connector = NULL;
2316 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2317
2318 return 0;
2319}
2320
2321static int dce_v6_0_early_init(void *handle)
2322{
2323 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2324
2325 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2326 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2327
2328 dce_v6_0_set_display_funcs(adev);
2329 dce_v6_0_set_irq_funcs(adev);
2330
2331 switch (adev->asic_type) {
2332 case CHIP_TAHITI:
2333 case CHIP_PITCAIRN:
2334 case CHIP_VERDE:
2335 adev->mode_info.num_crtc = 6;
2336 adev->mode_info.num_hpd = 6;
2337 adev->mode_info.num_dig = 6;
2338 break;
2339 case CHIP_OLAND:
2340 adev->mode_info.num_crtc = 2;
2341 adev->mode_info.num_hpd = 2;
2342 adev->mode_info.num_dig = 2;
2343 break;
2344 default:
2345 /* FIXME: not supported yet */
2346 return -EINVAL;
2347 }
2348
2349 return 0;
2350}
2351
2352static int dce_v6_0_sw_init(void *handle)
2353{
2354 int r, i;
2355 bool ret;
2356 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2357
2358 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2359 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2360 if (r)
2361 return r;
2362 }
2363
2364 for (i = 8; i < 20; i += 2) {
2365 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2366 if (r)
2367 return r;
2368 }
2369
2370 /* HPD hotplug */
2371 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2372 if (r)
2373 return r;
2374
2375 adev->mode_info.mode_config_initialized = true;
2376
2377 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2378
2379 adev->ddev->mode_config.async_page_flip = true;
2380
2381 adev->ddev->mode_config.max_width = 16384;
2382 adev->ddev->mode_config.max_height = 16384;
2383
2384 adev->ddev->mode_config.preferred_depth = 24;
2385 adev->ddev->mode_config.prefer_shadow = 1;
2386
2387 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2388
2389 r = amdgpu_modeset_create_props(adev);
2390 if (r)
2391 return r;
2392
2393 adev->ddev->mode_config.max_width = 16384;
2394 adev->ddev->mode_config.max_height = 16384;
2395
2396 /* allocate crtcs */
2397 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2398 r = dce_v6_0_crtc_init(adev, i);
2399 if (r)
2400 return r;
2401 }
2402
2403 ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2404 if (ret)
2405 amdgpu_print_display_setup(adev->ddev);
2406 else
2407 return -EINVAL;
2408
2409 /* setup afmt */
beb86f29
TSD
2410 r = dce_v6_0_afmt_init(adev);
2411 if (r)
2412 return r;
e2cdf640
KW
2413
2414 r = dce_v6_0_audio_init(adev);
2415 if (r)
2416 return r;
2417
2418 drm_kms_helper_poll_init(adev->ddev);
2419
2420 return r;
2421}
2422
2423static int dce_v6_0_sw_fini(void *handle)
2424{
2425 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2426
2427 kfree(adev->mode_info.bios_hardcoded_edid);
2428
2429 drm_kms_helper_poll_fini(adev->ddev);
2430
2431 dce_v6_0_audio_fini(adev);
2432
2433 dce_v6_0_afmt_fini(adev);
2434
2435 drm_mode_config_cleanup(adev->ddev);
2436 adev->mode_info.mode_config_initialized = false;
2437
2438 return 0;
2439}
2440
2441static int dce_v6_0_hw_init(void *handle)
2442{
2443 int i;
2444 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2445
2446 /* init dig PHYs, disp eng pll */
2447 amdgpu_atombios_encoder_init_dig(adev);
2448 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2449
2450 /* initialize hpd */
2451 dce_v6_0_hpd_init(adev);
2452
2453 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2454 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2455 }
2456
2457 dce_v6_0_pageflip_interrupt_init(adev);
2458
2459 return 0;
2460}
2461
2462static int dce_v6_0_hw_fini(void *handle)
2463{
2464 int i;
2465 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2466
2467 dce_v6_0_hpd_fini(adev);
2468
2469 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2470 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2471 }
2472
2473 dce_v6_0_pageflip_interrupt_fini(adev);
2474
2475 return 0;
2476}
2477
2478static int dce_v6_0_suspend(void *handle)
2479{
2480 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2481
2482 amdgpu_atombios_scratch_regs_save(adev);
2483
2484 return dce_v6_0_hw_fini(handle);
2485}
2486
2487static int dce_v6_0_resume(void *handle)
2488{
2489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2490 int ret;
2491
2492 ret = dce_v6_0_hw_init(handle);
2493
2494 amdgpu_atombios_scratch_regs_restore(adev);
2495
2496 /* turn on the BL */
2497 if (adev->mode_info.bl_encoder) {
2498 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2499 adev->mode_info.bl_encoder);
2500 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2501 bl_level);
2502 }
2503
2504 return ret;
2505}
2506
2507static bool dce_v6_0_is_idle(void *handle)
2508{
2509 return true;
2510}
2511
2512static int dce_v6_0_wait_for_idle(void *handle)
2513{
2514 return 0;
2515}
2516
2517static int dce_v6_0_soft_reset(void *handle)
2518{
2519 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2520 return 0;
2521}
2522
2523static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2524 int crtc,
2525 enum amdgpu_interrupt_state state)
2526{
2527 u32 reg_block, interrupt_mask;
2528
2529 if (crtc >= adev->mode_info.num_crtc) {
2530 DRM_DEBUG("invalid crtc %d\n", crtc);
2531 return;
2532 }
2533
2534 switch (crtc) {
2535 case 0:
2536 reg_block = SI_CRTC0_REGISTER_OFFSET;
2537 break;
2538 case 1:
2539 reg_block = SI_CRTC1_REGISTER_OFFSET;
2540 break;
2541 case 2:
2542 reg_block = SI_CRTC2_REGISTER_OFFSET;
2543 break;
2544 case 3:
2545 reg_block = SI_CRTC3_REGISTER_OFFSET;
2546 break;
2547 case 4:
2548 reg_block = SI_CRTC4_REGISTER_OFFSET;
2549 break;
2550 case 5:
2551 reg_block = SI_CRTC5_REGISTER_OFFSET;
2552 break;
2553 default:
2554 DRM_DEBUG("invalid crtc %d\n", crtc);
2555 return;
2556 }
2557
2558 switch (state) {
2559 case AMDGPU_IRQ_STATE_DISABLE:
2560 interrupt_mask = RREG32(INT_MASK + reg_block);
2561 interrupt_mask &= ~VBLANK_INT_MASK;
2562 WREG32(INT_MASK + reg_block, interrupt_mask);
2563 break;
2564 case AMDGPU_IRQ_STATE_ENABLE:
2565 interrupt_mask = RREG32(INT_MASK + reg_block);
2566 interrupt_mask |= VBLANK_INT_MASK;
2567 WREG32(INT_MASK + reg_block, interrupt_mask);
2568 break;
2569 default:
2570 break;
2571 }
2572}
2573
2574static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2575 int crtc,
2576 enum amdgpu_interrupt_state state)
2577{
2578
2579}
2580
2581static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2582 struct amdgpu_irq_src *src,
2583 unsigned type,
2584 enum amdgpu_interrupt_state state)
2585{
2586 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
2587
2588 switch (type) {
2589 case AMDGPU_HPD_1:
2590 dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
2591 break;
2592 case AMDGPU_HPD_2:
2593 dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
2594 break;
2595 case AMDGPU_HPD_3:
2596 dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
2597 break;
2598 case AMDGPU_HPD_4:
2599 dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
2600 break;
2601 case AMDGPU_HPD_5:
2602 dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
2603 break;
2604 case AMDGPU_HPD_6:
2605 dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
2606 break;
2607 default:
2608 DRM_DEBUG("invalid hdp %d\n", type);
2609 return 0;
2610 }
2611
2612 switch (state) {
2613 case AMDGPU_IRQ_STATE_DISABLE:
2614 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
2615 dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
2616 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
2617 break;
2618 case AMDGPU_IRQ_STATE_ENABLE:
2619 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
2620 dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
2621 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
2622 break;
2623 default:
2624 break;
2625 }
2626
2627 return 0;
2628}
2629
2630static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2631 struct amdgpu_irq_src *src,
2632 unsigned type,
2633 enum amdgpu_interrupt_state state)
2634{
2635 switch (type) {
2636 case AMDGPU_CRTC_IRQ_VBLANK1:
2637 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2638 break;
2639 case AMDGPU_CRTC_IRQ_VBLANK2:
2640 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2641 break;
2642 case AMDGPU_CRTC_IRQ_VBLANK3:
2643 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2644 break;
2645 case AMDGPU_CRTC_IRQ_VBLANK4:
2646 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2647 break;
2648 case AMDGPU_CRTC_IRQ_VBLANK5:
2649 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2650 break;
2651 case AMDGPU_CRTC_IRQ_VBLANK6:
2652 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2653 break;
2654 case AMDGPU_CRTC_IRQ_VLINE1:
2655 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2656 break;
2657 case AMDGPU_CRTC_IRQ_VLINE2:
2658 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2659 break;
2660 case AMDGPU_CRTC_IRQ_VLINE3:
2661 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2662 break;
2663 case AMDGPU_CRTC_IRQ_VLINE4:
2664 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2665 break;
2666 case AMDGPU_CRTC_IRQ_VLINE5:
2667 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2668 break;
2669 case AMDGPU_CRTC_IRQ_VLINE6:
2670 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2671 break;
2672 default:
2673 break;
2674 }
2675 return 0;
2676}
2677
2678static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2679 struct amdgpu_irq_src *source,
2680 struct amdgpu_iv_entry *entry)
2681{
2682 unsigned crtc = entry->src_id - 1;
2683 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2684 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
2685
2686 switch (entry->src_data) {
2687 case 0: /* vblank */
2688 if (disp_int & interrupt_status_offsets[crtc].vblank)
2689 WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2690 else
2691 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2692
2693 if (amdgpu_irq_enabled(adev, source, irq_type)) {
2694 drm_handle_vblank(adev->ddev, crtc);
2695 }
2696 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2697 break;
2698 case 1: /* vline */
2699 if (disp_int & interrupt_status_offsets[crtc].vline)
2700 WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2701 else
2702 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2703
2704 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2705 break;
2706 default:
2707 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2708 break;
2709 }
2710
2711 return 0;
2712}
2713
2714static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2715 struct amdgpu_irq_src *src,
2716 unsigned type,
2717 enum amdgpu_interrupt_state state)
2718{
2719 u32 reg;
2720
2721 if (type >= adev->mode_info.num_crtc) {
2722 DRM_ERROR("invalid pageflip crtc %d\n", type);
2723 return -EINVAL;
2724 }
2725
2726 reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
2727 if (state == AMDGPU_IRQ_STATE_DISABLE)
2728 WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
2729 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2730 else
2731 WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
2732 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2733
2734 return 0;
2735}
2736
2737static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2738 struct amdgpu_irq_src *source,
2739 struct amdgpu_iv_entry *entry)
2740{
2741 unsigned long flags;
2742 unsigned crtc_id;
2743 struct amdgpu_crtc *amdgpu_crtc;
2744 struct amdgpu_flip_work *works;
2745
2746 crtc_id = (entry->src_id - 8) >> 1;
2747 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2748
2749 if (crtc_id >= adev->mode_info.num_crtc) {
2750 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2751 return -EINVAL;
2752 }
2753
2754 if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
2755 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2756 WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
2757 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2758
2759 /* IRQ could occur when in initial stage */
2760 if (amdgpu_crtc == NULL)
2761 return 0;
2762
2763 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2764 works = amdgpu_crtc->pflip_works;
2765 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2766 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2767 "AMDGPU_FLIP_SUBMITTED(%d)\n",
2768 amdgpu_crtc->pflip_status,
2769 AMDGPU_FLIP_SUBMITTED);
2770 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2771 return 0;
2772 }
2773
2774 /* page flip completed. clean up */
2775 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2776 amdgpu_crtc->pflip_works = NULL;
2777
2778 /* wakeup usersapce */
2779 if (works->event)
2780 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2781
2782 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2783
2784 drm_crtc_vblank_put(&amdgpu_crtc->base);
2785 schedule_work(&works->unpin_work);
2786
2787 return 0;
2788}
2789
2790static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2791 struct amdgpu_irq_src *source,
2792 struct amdgpu_iv_entry *entry)
2793{
2794 uint32_t disp_int, mask, int_control, tmp;
2795 unsigned hpd;
2796
664a08bb 2797 if (entry->src_data >= adev->mode_info.num_hpd) {
e2cdf640
KW
2798 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2799 return 0;
2800 }
2801
2802 hpd = entry->src_data;
2803 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2804 mask = interrupt_status_offsets[hpd].hpd;
2805 int_control = hpd_int_control_offsets[hpd];
2806
2807 if (disp_int & mask) {
2808 tmp = RREG32(int_control);
2809 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2810 WREG32(int_control, tmp);
2811 schedule_work(&adev->hotplug_work);
2812 DRM_INFO("IH: HPD%d\n", hpd + 1);
2813 }
2814
2815 return 0;
2816
2817}
2818
2819static int dce_v6_0_set_clockgating_state(void *handle,
2820 enum amd_clockgating_state state)
2821{
2822 return 0;
2823}
2824
2825static int dce_v6_0_set_powergating_state(void *handle,
2826 enum amd_powergating_state state)
2827{
2828 return 0;
2829}
2830
2831const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2832 .name = "dce_v6_0",
2833 .early_init = dce_v6_0_early_init,
2834 .late_init = NULL,
2835 .sw_init = dce_v6_0_sw_init,
2836 .sw_fini = dce_v6_0_sw_fini,
2837 .hw_init = dce_v6_0_hw_init,
2838 .hw_fini = dce_v6_0_hw_fini,
2839 .suspend = dce_v6_0_suspend,
2840 .resume = dce_v6_0_resume,
2841 .is_idle = dce_v6_0_is_idle,
2842 .wait_for_idle = dce_v6_0_wait_for_idle,
2843 .soft_reset = dce_v6_0_soft_reset,
2844 .set_clockgating_state = dce_v6_0_set_clockgating_state,
2845 .set_powergating_state = dce_v6_0_set_powergating_state,
2846};
2847
2848static void
2849dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2850 struct drm_display_mode *mode,
2851 struct drm_display_mode *adjusted_mode)
2852{
2853
2854 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2855
2856 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2857
2858 /* need to call this here rather than in prepare() since we need some crtc info */
2859 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2860
2861 /* set scaler clears this on some chips */
2862 dce_v6_0_set_interleave(encoder->crtc, mode);
2863
2864 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2865 dce_v6_0_afmt_enable(encoder, true);
2866 dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2867 }
2868}
2869
2870static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2871{
2872
2873 struct amdgpu_device *adev = encoder->dev->dev_private;
2874 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2875 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2876
2877 if ((amdgpu_encoder->active_device &
2878 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2879 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2880 ENCODER_OBJECT_ID_NONE)) {
2881 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2882 if (dig) {
2883 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2884 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2885 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2886 }
2887 }
2888
2889 amdgpu_atombios_scratch_regs_lock(adev, true);
2890
2891 if (connector) {
2892 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2893
2894 /* select the clock/data port if it uses a router */
2895 if (amdgpu_connector->router.cd_valid)
2896 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2897
2898 /* turn eDP panel on for mode set */
2899 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2900 amdgpu_atombios_encoder_set_edp_panel_power(connector,
2901 ATOM_TRANSMITTER_ACTION_POWER_ON);
2902 }
2903
2904 /* this is needed for the pll/ss setup to work correctly in some cases */
2905 amdgpu_atombios_encoder_set_crtc_source(encoder);
2906 /* set up the FMT blocks */
2907 dce_v6_0_program_fmt(encoder);
2908}
2909
2910static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2911{
2912
2913 struct drm_device *dev = encoder->dev;
2914 struct amdgpu_device *adev = dev->dev_private;
2915
2916 /* need to call this here as we need the crtc set up */
2917 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2918 amdgpu_atombios_scratch_regs_lock(adev, false);
2919}
2920
2921static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2922{
2923
2924 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2925 struct amdgpu_encoder_atom_dig *dig;
2926
2927 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2928
2929 if (amdgpu_atombios_encoder_is_digital(encoder)) {
2930 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2931 dce_v6_0_afmt_enable(encoder, false);
2932 dig = amdgpu_encoder->enc_priv;
2933 dig->dig_encoder = -1;
2934 }
2935 amdgpu_encoder->active_device = 0;
2936}
2937
2938/* these are handled by the primary encoders */
2939static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2940{
2941
2942}
2943
2944static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2945{
2946
2947}
2948
2949static void
2950dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2951 struct drm_display_mode *mode,
2952 struct drm_display_mode *adjusted_mode)
2953{
2954
2955}
2956
2957static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2958{
2959
2960}
2961
2962static void
2963dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2964{
2965
2966}
2967
2968static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2969 const struct drm_display_mode *mode,
2970 struct drm_display_mode *adjusted_mode)
2971{
2972 return true;
2973}
2974
2975static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2976 .dpms = dce_v6_0_ext_dpms,
2977 .mode_fixup = dce_v6_0_ext_mode_fixup,
2978 .prepare = dce_v6_0_ext_prepare,
2979 .mode_set = dce_v6_0_ext_mode_set,
2980 .commit = dce_v6_0_ext_commit,
2981 .disable = dce_v6_0_ext_disable,
2982 /* no detect for TMDS/LVDS yet */
2983};
2984
2985static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2986 .dpms = amdgpu_atombios_encoder_dpms,
2987 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2988 .prepare = dce_v6_0_encoder_prepare,
2989 .mode_set = dce_v6_0_encoder_mode_set,
2990 .commit = dce_v6_0_encoder_commit,
2991 .disable = dce_v6_0_encoder_disable,
2992 .detect = amdgpu_atombios_encoder_dig_detect,
2993};
2994
2995static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
2996 .dpms = amdgpu_atombios_encoder_dpms,
2997 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2998 .prepare = dce_v6_0_encoder_prepare,
2999 .mode_set = dce_v6_0_encoder_mode_set,
3000 .commit = dce_v6_0_encoder_commit,
3001 .detect = amdgpu_atombios_encoder_dac_detect,
3002};
3003
3004static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3005{
3006 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3007 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3008 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3009 kfree(amdgpu_encoder->enc_priv);
3010 drm_encoder_cleanup(encoder);
3011 kfree(amdgpu_encoder);
3012}
3013
3014static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3015 .destroy = dce_v6_0_encoder_destroy,
3016};
3017
3018static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3019 uint32_t encoder_enum,
3020 uint32_t supported_device,
3021 u16 caps)
3022{
3023 struct drm_device *dev = adev->ddev;
3024 struct drm_encoder *encoder;
3025 struct amdgpu_encoder *amdgpu_encoder;
3026
3027 /* see if we already added it */
3028 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3029 amdgpu_encoder = to_amdgpu_encoder(encoder);
3030 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3031 amdgpu_encoder->devices |= supported_device;
3032 return;
3033 }
3034
3035 }
3036
3037 /* add a new one */
3038 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3039 if (!amdgpu_encoder)
3040 return;
3041
3042 encoder = &amdgpu_encoder->base;
3043 switch (adev->mode_info.num_crtc) {
3044 case 1:
3045 encoder->possible_crtcs = 0x1;
3046 break;
3047 case 2:
3048 default:
3049 encoder->possible_crtcs = 0x3;
3050 break;
3051 case 4:
3052 encoder->possible_crtcs = 0xf;
3053 break;
3054 case 6:
3055 encoder->possible_crtcs = 0x3f;
3056 break;
3057 }
3058
3059 amdgpu_encoder->enc_priv = NULL;
3060
3061 amdgpu_encoder->encoder_enum = encoder_enum;
3062 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3063 amdgpu_encoder->devices = supported_device;
3064 amdgpu_encoder->rmx_type = RMX_OFF;
3065 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3066 amdgpu_encoder->is_ext_encoder = false;
3067 amdgpu_encoder->caps = caps;
3068
3069 switch (amdgpu_encoder->encoder_id) {
3070 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3071 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3072 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3073 DRM_MODE_ENCODER_DAC, NULL);
3074 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3075 break;
3076 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3077 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3078 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3079 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3080 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3081 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3082 amdgpu_encoder->rmx_type = RMX_FULL;
3083 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3084 DRM_MODE_ENCODER_LVDS, NULL);
3085 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3086 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3087 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3088 DRM_MODE_ENCODER_DAC, NULL);
3089 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3090 } else {
3091 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3092 DRM_MODE_ENCODER_TMDS, NULL);
3093 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3094 }
3095 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3096 break;
3097 case ENCODER_OBJECT_ID_SI170B:
3098 case ENCODER_OBJECT_ID_CH7303:
3099 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3100 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3101 case ENCODER_OBJECT_ID_TITFP513:
3102 case ENCODER_OBJECT_ID_VT1623:
3103 case ENCODER_OBJECT_ID_HDMI_SI1930:
3104 case ENCODER_OBJECT_ID_TRAVIS:
3105 case ENCODER_OBJECT_ID_NUTMEG:
3106 /* these are handled by the primary encoders */
3107 amdgpu_encoder->is_ext_encoder = true;
3108 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3109 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3110 DRM_MODE_ENCODER_LVDS, NULL);
3111 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3112 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3113 DRM_MODE_ENCODER_DAC, NULL);
3114 else
3115 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3116 DRM_MODE_ENCODER_TMDS, NULL);
3117 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3118 break;
3119 }
3120}
3121
3122static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3123 .set_vga_render_state = &dce_v6_0_set_vga_render_state,
3124 .bandwidth_update = &dce_v6_0_bandwidth_update,
3125 .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3126 .vblank_wait = &dce_v6_0_vblank_wait,
3127 .is_display_hung = &dce_v6_0_is_display_hung,
3128 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3129 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3130 .hpd_sense = &dce_v6_0_hpd_sense,
3131 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3132 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3133 .page_flip = &dce_v6_0_page_flip,
3134 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3135 .add_encoder = &dce_v6_0_encoder_add,
3136 .add_connector = &amdgpu_connector_add,
3137 .stop_mc_access = &dce_v6_0_stop_mc_access,
3138 .resume_mc_access = &dce_v6_0_resume_mc_access,
3139};
3140
3141static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3142{
3143 if (adev->mode_info.funcs == NULL)
3144 adev->mode_info.funcs = &dce_v6_0_display_funcs;
3145}
3146
3147static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3148 .set = dce_v6_0_set_crtc_interrupt_state,
3149 .process = dce_v6_0_crtc_irq,
3150};
3151
3152static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3153 .set = dce_v6_0_set_pageflip_interrupt_state,
3154 .process = dce_v6_0_pageflip_irq,
3155};
3156
3157static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3158 .set = dce_v6_0_set_hpd_interrupt_state,
3159 .process = dce_v6_0_hpd_irq,
3160};
3161
3162static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3163{
3164 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3165 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3166
3167 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3168 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3169
3170 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3171 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3172}