]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drm/amd/display: Remove unnecessary NULL check
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
9d83722d 37#include "amdgpu_dm_trace.h"
4562236b
HW
38
39#include "vid.h"
40#include "amdgpu.h"
a49dcb88 41#include "amdgpu_display.h"
a94d5569 42#include "amdgpu_ucode.h"
4562236b
HW
43#include "atom.h"
44#include "amdgpu_dm.h"
52704fca
BL
45#ifdef CONFIG_DRM_AMD_DC_HDCP
46#include "amdgpu_dm_hdcp.h"
53e108aa 47#include <drm/drm_hdcp.h>
52704fca 48#endif
e7b07cee 49#include "amdgpu_pm.h"
4562236b
HW
50
51#include "amd_shared.h"
52#include "amdgpu_dm_irq.h"
53#include "dm_helpers.h"
e7b07cee 54#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
55#if defined(CONFIG_DEBUG_FS)
56#include "amdgpu_dm_debugfs.h"
57#endif
4562236b
HW
58
59#include "ivsrcid/ivsrcid_vislands30.h"
60
61#include <linux/module.h>
62#include <linux/moduleparam.h>
63#include <linux/version.h>
e7b07cee 64#include <linux/types.h>
97028037 65#include <linux/pm_runtime.h>
09d21852 66#include <linux/pci.h>
a94d5569 67#include <linux/firmware.h>
6ce8f316 68#include <linux/component.h>
4562236b
HW
69
70#include <drm/drm_atomic.h>
674e78ac 71#include <drm/drm_atomic_uapi.h>
4562236b
HW
72#include <drm/drm_atomic_helper.h>
73#include <drm/drm_dp_mst_helper.h>
e7b07cee 74#include <drm/drm_fb_helper.h>
09d21852 75#include <drm/drm_fourcc.h>
e7b07cee 76#include <drm/drm_edid.h>
09d21852 77#include <drm/drm_vblank.h>
6ce8f316 78#include <drm/drm_audio_component.h>
0c8620d6 79#include <drm/drm_hdcp.h>
4562236b 80
b86a1aa3 81#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 82#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 83
ad941f7a
FX
84#include "dcn/dcn_1_0_offset.h"
85#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
86#include "soc15_hw_ip.h"
87#include "vega10_ip_offset.h"
ff5ef992
AD
88
89#include "soc15_common.h"
90#endif
91
e7b07cee 92#include "modules/inc/mod_freesync.h"
bbf854dc 93#include "modules/power/power_helpers.h"
ecd0136b 94#include "modules/inc/mod_info_packet.h"
e7b07cee 95
743b9786
NK
96#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
100#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
102#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
104#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
106#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
2200eb9e 108
a94d5569
DF
109#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
110MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 111
5ea23931
RL
112#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
113MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114
8c7aea40
NK
115/* Number of bytes in PSP header for firmware. */
116#define PSP_HEADER_BYTES 0x100
117
118/* Number of bytes in PSP footer for firmware. */
119#define PSP_FOOTER_BYTES 0x100
120
b8592b48
LL
121/**
122 * DOC: overview
123 *
124 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126 * requests into DC requests, and DC responses into DRM responses.
127 *
128 * The root control structure is &struct amdgpu_display_manager.
129 */
130
7578ecda
AD
131/* basic init/fini API */
132static int amdgpu_dm_init(struct amdgpu_device *adev);
133static void amdgpu_dm_fini(struct amdgpu_device *adev);
134
0f877894
OV
135static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136{
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 default:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
151 }
152}
153
154static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155{
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 return;
162
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
165
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
168 subconnector);
169}
170
1f6010a9
DF
171/*
172 * initializes drm_device display related structures, based on the information
7578ecda
AD
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
175 *
176 * Returns 0 on success
177 */
178static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179/* removes and deallocates the drm structures, created by the above function */
180static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
7578ecda 182static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 183 struct drm_plane *plane,
cc1fec57
NK
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
7578ecda
AD
186static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 uint32_t link_index,
192 struct amdgpu_encoder *amdgpu_encoder);
193static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
196
197static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
7578ecda
AD
199static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
203
674e78ac
NK
204static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
7578ecda 206
8c322309
RL
207static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 211static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 212
dfbbfe3c
BN
213static const struct drm_format_info *
214amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
4562236b
HW
216/*
217 * dm_vblank_get_counter
218 *
219 * @brief
220 * Get counter for number of vertical blanks
221 *
222 * @param
223 * struct amdgpu_device *adev - [in] desired amdgpu device
224 * int disp_idx - [in] which CRTC to get the counter from
225 *
226 * @return
227 * Counter for vertical blanks
228 */
229static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
230{
231 if (crtc >= adev->mode_info.num_crtc)
232 return 0;
233 else {
234 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
235
585d450c 236 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
237 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
238 crtc);
4562236b
HW
239 return 0;
240 }
241
585d450c 242 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
243 }
244}
245
246static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 247 u32 *vbl, u32 *position)
4562236b 248{
81c50963
ST
249 uint32_t v_blank_start, v_blank_end, h_position, v_position;
250
4562236b
HW
251 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
252 return -EINVAL;
253 else {
254 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
255
585d450c 256 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
257 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
258 crtc);
4562236b
HW
259 return 0;
260 }
261
81c50963
ST
262 /*
263 * TODO rework base driver to use values directly.
264 * for now parse it back into reg-format
265 */
585d450c 266 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
267 &v_blank_start,
268 &v_blank_end,
269 &h_position,
270 &v_position);
271
e806208d
AG
272 *position = v_position | (h_position << 16);
273 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
274 }
275
276 return 0;
277}
278
279static bool dm_is_idle(void *handle)
280{
281 /* XXX todo */
282 return true;
283}
284
285static int dm_wait_for_idle(void *handle)
286{
287 /* XXX todo */
288 return 0;
289}
290
291static bool dm_check_soft_reset(void *handle)
292{
293 return false;
294}
295
296static int dm_soft_reset(void *handle)
297{
298 /* XXX todo */
299 return 0;
300}
301
3ee6b26b
AD
302static struct amdgpu_crtc *
303get_crtc_by_otg_inst(struct amdgpu_device *adev,
304 int otg_inst)
4562236b 305{
4a580877 306 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
307 struct drm_crtc *crtc;
308 struct amdgpu_crtc *amdgpu_crtc;
309
4562236b
HW
310 if (otg_inst == -1) {
311 WARN_ON(1);
312 return adev->mode_info.crtcs[0];
313 }
314
315 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
316 amdgpu_crtc = to_amdgpu_crtc(crtc);
317
318 if (amdgpu_crtc->otg_inst == otg_inst)
319 return amdgpu_crtc;
320 }
321
322 return NULL;
323}
324
585d450c
AP
325static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
326{
327 return acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_VARIABLE ||
329 acrtc->dm_irq_params.freesync_config.state ==
330 VRR_STATE_ACTIVE_FIXED;
331}
332
66b0c973
MK
333static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
334{
335 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
336 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
337}
338
b8e8c934
HW
339/**
340 * dm_pflip_high_irq() - Handle pageflip interrupt
341 * @interrupt_params: ignored
342 *
343 * Handles the pageflip interrupt by notifying all interested parties
344 * that the pageflip has been completed.
345 */
4562236b
HW
346static void dm_pflip_high_irq(void *interrupt_params)
347{
4562236b
HW
348 struct amdgpu_crtc *amdgpu_crtc;
349 struct common_irq_params *irq_params = interrupt_params;
350 struct amdgpu_device *adev = irq_params->adev;
351 unsigned long flags;
71bbe51a 352 struct drm_pending_vblank_event *e;
71bbe51a
MK
353 uint32_t vpos, hpos, v_blank_start, v_blank_end;
354 bool vrr_active;
4562236b
HW
355
356 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
357
358 /* IRQ could occur when in initial stage */
1f6010a9 359 /* TODO work and BO cleanup */
4562236b
HW
360 if (amdgpu_crtc == NULL) {
361 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
362 return;
363 }
364
4a580877 365 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
366
367 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
368 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
369 amdgpu_crtc->pflip_status,
370 AMDGPU_FLIP_SUBMITTED,
371 amdgpu_crtc->crtc_id,
372 amdgpu_crtc);
4a580877 373 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
374 return;
375 }
376
71bbe51a
MK
377 /* page flip completed. */
378 e = amdgpu_crtc->event;
379 amdgpu_crtc->event = NULL;
4562236b 380
71bbe51a
MK
381 if (!e)
382 WARN_ON(1);
1159898a 383
585d450c 384 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
385
386 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
387 if (!vrr_active ||
585d450c 388 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
389 &v_blank_end, &hpos, &vpos) ||
390 (vpos < v_blank_start)) {
391 /* Update to correct count and vblank timestamp if racing with
392 * vblank irq. This also updates to the correct vblank timestamp
393 * even in VRR mode, as scanout is past the front-porch atm.
394 */
395 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 396
71bbe51a
MK
397 /* Wake up userspace by sending the pageflip event with proper
398 * count and timestamp of vblank of flip completion.
399 */
400 if (e) {
401 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
402
403 /* Event sent, so done with vblank for this flip */
404 drm_crtc_vblank_put(&amdgpu_crtc->base);
405 }
406 } else if (e) {
407 /* VRR active and inside front-porch: vblank count and
408 * timestamp for pageflip event will only be up to date after
409 * drm_crtc_handle_vblank() has been executed from late vblank
410 * irq handler after start of back-porch (vline 0). We queue the
411 * pageflip event for send-out by drm_crtc_handle_vblank() with
412 * updated timestamp and count, once it runs after us.
413 *
414 * We need to open-code this instead of using the helper
415 * drm_crtc_arm_vblank_event(), as that helper would
416 * call drm_crtc_accurate_vblank_count(), which we must
417 * not call in VRR mode while we are in front-porch!
418 */
419
420 /* sequence will be replaced by real count during send-out. */
421 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
422 e->pipe = amdgpu_crtc->crtc_id;
423
4a580877 424 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
425 e = NULL;
426 }
4562236b 427
fdd1fe57
MK
428 /* Keep track of vblank of this flip for flip throttling. We use the
429 * cooked hw counter, as that one incremented at start of this vblank
430 * of pageflip completion, so last_flip_vblank is the forbidden count
431 * for queueing new pageflips if vsync + VRR is enabled.
432 */
5d1c59c4 433 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 434 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 435
54f5499a 436 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 437 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 438
71bbe51a
MK
439 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
440 amdgpu_crtc->crtc_id, amdgpu_crtc,
441 vrr_active, (int) !e);
4562236b
HW
442}
443
d2574c33
MK
444static void dm_vupdate_high_irq(void *interrupt_params)
445{
446 struct common_irq_params *irq_params = interrupt_params;
447 struct amdgpu_device *adev = irq_params->adev;
448 struct amdgpu_crtc *acrtc;
09aef2c4 449 unsigned long flags;
585d450c 450 int vrr_active;
d2574c33
MK
451
452 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
453
454 if (acrtc) {
585d450c 455 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 456
7f2be468
LP
457 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
458 acrtc->crtc_id,
585d450c 459 vrr_active);
d2574c33
MK
460
461 /* Core vblank handling is done here after end of front-porch in
462 * vrr mode, as vblank timestamping will give valid results
463 * while now done after front-porch. This will also deliver
464 * page-flip completion events that have been queued to us
465 * if a pageflip happened inside front-porch.
466 */
585d450c 467 if (vrr_active) {
d2574c33 468 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
469
470 /* BTR processing for pre-DCE12 ASICs */
585d450c 471 if (acrtc->dm_irq_params.stream &&
09aef2c4 472 adev->family < AMDGPU_FAMILY_AI) {
4a580877 473 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
474 mod_freesync_handle_v_update(
475 adev->dm.freesync_module,
585d450c
AP
476 acrtc->dm_irq_params.stream,
477 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
478
479 dc_stream_adjust_vmin_vmax(
480 adev->dm.dc,
585d450c
AP
481 acrtc->dm_irq_params.stream,
482 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 483 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
484 }
485 }
d2574c33
MK
486 }
487}
488
b8e8c934
HW
489/**
490 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 491 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
492 *
493 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
494 * event handler.
495 */
4562236b
HW
496static void dm_crtc_high_irq(void *interrupt_params)
497{
498 struct common_irq_params *irq_params = interrupt_params;
499 struct amdgpu_device *adev = irq_params->adev;
4562236b 500 struct amdgpu_crtc *acrtc;
09aef2c4 501 unsigned long flags;
585d450c 502 int vrr_active;
4562236b 503
b57de80a 504 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
505 if (!acrtc)
506 return;
507
585d450c 508 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 509
2b5aed9a 510 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 511 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 512
2346ef47
NK
513 /**
514 * Core vblank handling at start of front-porch is only possible
515 * in non-vrr mode, as only there vblank timestamping will give
516 * valid results while done in front-porch. Otherwise defer it
517 * to dm_vupdate_high_irq after end of front-porch.
518 */
585d450c 519 if (!vrr_active)
2346ef47
NK
520 drm_crtc_handle_vblank(&acrtc->base);
521
522 /**
523 * Following stuff must happen at start of vblank, for crc
524 * computation and below-the-range btr support in vrr mode.
525 */
16f17eda 526 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
527
528 /* BTR updates need to happen before VUPDATE on Vega and above. */
529 if (adev->family < AMDGPU_FAMILY_AI)
530 return;
16f17eda 531
4a580877 532 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 533
585d450c
AP
534 if (acrtc->dm_irq_params.stream &&
535 acrtc->dm_irq_params.vrr_params.supported &&
536 acrtc->dm_irq_params.freesync_config.state ==
537 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 538 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
539 acrtc->dm_irq_params.stream,
540 &acrtc->dm_irq_params.vrr_params);
16f17eda 541
585d450c
AP
542 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
543 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
544 }
545
2b5aed9a
MK
546 /*
547 * If there aren't any active_planes then DCH HUBP may be clock-gated.
548 * In that case, pageflip completion interrupts won't fire and pageflip
549 * completion events won't get delivered. Prevent this by sending
550 * pending pageflip events from here if a flip is still pending.
551 *
552 * If any planes are enabled, use dm_pflip_high_irq() instead, to
553 * avoid race conditions between flip programming and completion,
554 * which could cause too early flip completion events.
555 */
2346ef47
NK
556 if (adev->family >= AMDGPU_FAMILY_RV &&
557 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 558 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
559 if (acrtc->event) {
560 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
561 acrtc->event = NULL;
562 drm_crtc_vblank_put(&acrtc->base);
563 }
564 acrtc->pflip_status = AMDGPU_FLIP_NONE;
565 }
566
4a580877 567 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
568}
569
4562236b
HW
570static int dm_set_clockgating_state(void *handle,
571 enum amd_clockgating_state state)
572{
573 return 0;
574}
575
576static int dm_set_powergating_state(void *handle,
577 enum amd_powergating_state state)
578{
579 return 0;
580}
581
582/* Prototypes of private functions */
583static int dm_early_init(void* handle);
584
a32e24b4 585/* Allocate memory for FBC compressed data */
3e332d3a 586static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 587{
3e332d3a 588 struct drm_device *dev = connector->dev;
1348969a 589 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 590 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
591 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
592 struct drm_display_mode *mode;
42e67c3b
RL
593 unsigned long max_size = 0;
594
595 if (adev->dm.dc->fbc_compressor == NULL)
596 return;
a32e24b4 597
3e332d3a 598 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
599 return;
600
3e332d3a
RL
601 if (compressor->bo_ptr)
602 return;
42e67c3b 603
42e67c3b 604
3e332d3a
RL
605 list_for_each_entry(mode, &connector->modes, head) {
606 if (max_size < mode->htotal * mode->vtotal)
607 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
608 }
609
610 if (max_size) {
611 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 612 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 613 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
614
615 if (r)
42e67c3b
RL
616 DRM_ERROR("DM: Failed to initialize FBC\n");
617 else {
618 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
619 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
620 }
621
a32e24b4
RL
622 }
623
624}
a32e24b4 625
6ce8f316
NK
626static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
627 int pipe, bool *enabled,
628 unsigned char *buf, int max_bytes)
629{
630 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 631 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
632 struct drm_connector *connector;
633 struct drm_connector_list_iter conn_iter;
634 struct amdgpu_dm_connector *aconnector;
635 int ret = 0;
636
637 *enabled = false;
638
639 mutex_lock(&adev->dm.audio_lock);
640
641 drm_connector_list_iter_begin(dev, &conn_iter);
642 drm_for_each_connector_iter(connector, &conn_iter) {
643 aconnector = to_amdgpu_dm_connector(connector);
644 if (aconnector->audio_inst != port)
645 continue;
646
647 *enabled = true;
648 ret = drm_eld_size(connector->eld);
649 memcpy(buf, connector->eld, min(max_bytes, ret));
650
651 break;
652 }
653 drm_connector_list_iter_end(&conn_iter);
654
655 mutex_unlock(&adev->dm.audio_lock);
656
657 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
658
659 return ret;
660}
661
662static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
663 .get_eld = amdgpu_dm_audio_component_get_eld,
664};
665
666static int amdgpu_dm_audio_component_bind(struct device *kdev,
667 struct device *hda_kdev, void *data)
668{
669 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 670 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
671 struct drm_audio_component *acomp = data;
672
673 acomp->ops = &amdgpu_dm_audio_component_ops;
674 acomp->dev = kdev;
675 adev->dm.audio_component = acomp;
676
677 return 0;
678}
679
680static void amdgpu_dm_audio_component_unbind(struct device *kdev,
681 struct device *hda_kdev, void *data)
682{
683 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 684 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
685 struct drm_audio_component *acomp = data;
686
687 acomp->ops = NULL;
688 acomp->dev = NULL;
689 adev->dm.audio_component = NULL;
690}
691
692static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
693 .bind = amdgpu_dm_audio_component_bind,
694 .unbind = amdgpu_dm_audio_component_unbind,
695};
696
697static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
698{
699 int i, ret;
700
701 if (!amdgpu_audio)
702 return 0;
703
704 adev->mode_info.audio.enabled = true;
705
706 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707
708 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709 adev->mode_info.audio.pin[i].channels = -1;
710 adev->mode_info.audio.pin[i].rate = -1;
711 adev->mode_info.audio.pin[i].bits_per_sample = -1;
712 adev->mode_info.audio.pin[i].status_bits = 0;
713 adev->mode_info.audio.pin[i].category_code = 0;
714 adev->mode_info.audio.pin[i].connected = false;
715 adev->mode_info.audio.pin[i].id =
716 adev->dm.dc->res_pool->audios[i]->inst;
717 adev->mode_info.audio.pin[i].offset = 0;
718 }
719
720 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
721 if (ret < 0)
722 return ret;
723
724 adev->dm.audio_registered = true;
725
726 return 0;
727}
728
729static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
730{
731 if (!amdgpu_audio)
732 return;
733
734 if (!adev->mode_info.audio.enabled)
735 return;
736
737 if (adev->dm.audio_registered) {
738 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739 adev->dm.audio_registered = false;
740 }
741
742 /* TODO: Disable audio? */
743
744 adev->mode_info.audio.enabled = false;
745}
746
dfd84d90 747static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
748{
749 struct drm_audio_component *acomp = adev->dm.audio_component;
750
751 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753
754 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
755 pin, -1);
756 }
757}
758
743b9786
NK
759static int dm_dmub_hw_init(struct amdgpu_device *adev)
760{
743b9786
NK
761 const struct dmcub_firmware_header_v1_0 *hdr;
762 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 763 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
764 const struct firmware *dmub_fw = adev->dm.dmub_fw;
765 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
767 struct dmub_srv_hw_params hw_params;
768 enum dmub_status status;
769 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 770 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
771 bool has_hw_support;
772
773 if (!dmub_srv)
774 /* DMUB isn't supported on the ASIC. */
775 return 0;
776
8c7aea40
NK
777 if (!fb_info) {
778 DRM_ERROR("No framebuffer info for DMUB service.\n");
779 return -EINVAL;
780 }
781
743b9786
NK
782 if (!dmub_fw) {
783 /* Firmware required for DMUB support. */
784 DRM_ERROR("No firmware provided for DMUB.\n");
785 return -EINVAL;
786 }
787
788 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789 if (status != DMUB_STATUS_OK) {
790 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
791 return -EINVAL;
792 }
793
794 if (!has_hw_support) {
795 DRM_INFO("DMUB unsupported on ASIC\n");
796 return 0;
797 }
798
799 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800
743b9786
NK
801 fw_inst_const = dmub_fw->data +
802 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 803 PSP_HEADER_BYTES;
743b9786
NK
804
805 fw_bss_data = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 le32_to_cpu(hdr->inst_const_bytes);
808
809 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
810 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
811 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812
813 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814
ddde28a5
HW
815 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816 * amdgpu_ucode_init_single_fw will load dmub firmware
817 * fw_inst_const part to cw0; otherwise, the firmware back door load
818 * will be done by dm_dmub_hw_init
819 */
820 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
822 fw_inst_const_size);
823 }
824
a576b345
NK
825 if (fw_bss_data_size)
826 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
827 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
828
829 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
830 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
831 adev->bios_size);
832
833 /* Reset regions that need to be reset. */
834 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
835 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836
837 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
838 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839
840 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
841 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
842
843 /* Initialize hardware. */
844 memset(&hw_params, 0, sizeof(hw_params));
845 hw_params.fb_base = adev->gmc.fb_start;
846 hw_params.fb_offset = adev->gmc.aper_base;
847
31a7f4bb
HW
848 /* backdoor load firmware and trigger dmub running */
849 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850 hw_params.load_inst_const = true;
851
743b9786
NK
852 if (dmcu)
853 hw_params.psp_version = dmcu->psp_version;
854
8c7aea40
NK
855 for (i = 0; i < fb_info->num_fb; ++i)
856 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
857
858 status = dmub_srv_hw_init(dmub_srv, &hw_params);
859 if (status != DMUB_STATUS_OK) {
860 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
861 return -EINVAL;
862 }
863
864 /* Wait for firmware load to finish. */
865 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866 if (status != DMUB_STATUS_OK)
867 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868
869 /* Init DMCU and ABM if available. */
870 if (dmcu && abm) {
871 dmcu->funcs->dmcu_init(dmcu);
872 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
873 }
874
9a71c7d3
NK
875 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
876 if (!adev->dm.dc->ctx->dmub_srv) {
877 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
878 return -ENOMEM;
879 }
880
743b9786
NK
881 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
882 adev->dm.dmcub_fw_version);
883
884 return 0;
885}
886
e6cd859d 887#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 888static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 889{
c0fb85ae
YZ
890 uint64_t pt_base;
891 uint32_t logical_addr_low;
892 uint32_t logical_addr_high;
893 uint32_t agp_base, agp_bot, agp_top;
894 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 895
c0fb85ae
YZ
896 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
897 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 898
c0fb85ae
YZ
899 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
900 /*
901 * Raven2 has a HW issue that it is unable to use the vram which
902 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
903 * workaround that increase system aperture high address (add 1)
904 * to get rid of the VM fault and hardware hang.
905 */
906 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
907 else
908 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 909
c0fb85ae
YZ
910 agp_base = 0;
911 agp_bot = adev->gmc.agp_start >> 24;
912 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 913
c44a22b3 914
c0fb85ae
YZ
915 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
916 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
917 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
918 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
919 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
920 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 921
c0fb85ae
YZ
922 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
923 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
924
925 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
926 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
927 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
928
929 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
930 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
931 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
932
933 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
934 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
935 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
936
937 pa_config->is_hvm_enabled = 0;
c44a22b3 938
c44a22b3 939}
e6cd859d 940#endif
c44a22b3 941
c920888c
WL
942#ifdef CONFIG_DEBUG_FS
943static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
944{
945 dm->crc_win_x_start_property =
946 drm_property_create_range(adev_to_drm(dm->adev),
947 DRM_MODE_PROP_ATOMIC,
948 "AMD_CRC_WIN_X_START", 0, U16_MAX);
949 if (!dm->crc_win_x_start_property)
950 return -ENOMEM;
951
952 dm->crc_win_y_start_property =
953 drm_property_create_range(adev_to_drm(dm->adev),
954 DRM_MODE_PROP_ATOMIC,
955 "AMD_CRC_WIN_Y_START", 0, U16_MAX);
956 if (!dm->crc_win_y_start_property)
957 return -ENOMEM;
958
959 dm->crc_win_x_end_property =
960 drm_property_create_range(adev_to_drm(dm->adev),
961 DRM_MODE_PROP_ATOMIC,
962 "AMD_CRC_WIN_X_END", 0, U16_MAX);
963 if (!dm->crc_win_x_end_property)
964 return -ENOMEM;
965
966 dm->crc_win_y_end_property =
967 drm_property_create_range(adev_to_drm(dm->adev),
968 DRM_MODE_PROP_ATOMIC,
969 "AMD_CRC_WIN_Y_END", 0, U16_MAX);
970 if (!dm->crc_win_y_end_property)
971 return -ENOMEM;
972
973 return 0;
974}
975#endif
976
7578ecda 977static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
978{
979 struct dc_init_data init_data;
52704fca
BL
980#ifdef CONFIG_DRM_AMD_DC_HDCP
981 struct dc_callback_init init_params;
982#endif
743b9786 983 int r;
52704fca 984
4a580877 985 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
986 adev->dm.adev = adev;
987
4562236b
HW
988 /* Zero all the fields */
989 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
990#ifdef CONFIG_DRM_AMD_DC_HDCP
991 memset(&init_params, 0, sizeof(init_params));
992#endif
4562236b 993
674e78ac 994 mutex_init(&adev->dm.dc_lock);
6ce8f316 995 mutex_init(&adev->dm.audio_lock);
674e78ac 996
4562236b
HW
997 if(amdgpu_dm_irq_init(adev)) {
998 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
999 goto error;
1000 }
1001
1002 init_data.asic_id.chip_family = adev->family;
1003
2dc31ca1 1004 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1005 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1006
770d13b1 1007 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1008 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1009 init_data.asic_id.atombios_base_address =
1010 adev->mode_info.atom_context->bios;
1011
1012 init_data.driver = adev;
1013
1014 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1015
1016 if (!adev->dm.cgs_device) {
1017 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1018 goto error;
1019 }
1020
1021 init_data.cgs_device = adev->dm.cgs_device;
1022
4562236b
HW
1023 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1024
60fb100b
AD
1025 switch (adev->asic_type) {
1026 case CHIP_CARRIZO:
1027 case CHIP_STONEY:
1028 case CHIP_RAVEN:
fe3db437 1029 case CHIP_RENOIR:
6e227308 1030 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1031 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1032 init_data.flags.disable_dmcu = true;
60fb100b 1033 break;
6df9218a
CL
1034#if defined(CONFIG_DRM_AMD_DC_DCN)
1035 case CHIP_VANGOGH:
1036 init_data.flags.gpu_vm_support = true;
1037 break;
1038#endif
60fb100b
AD
1039 default:
1040 break;
1041 }
6e227308 1042
04b94af4
AD
1043 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1044 init_data.flags.fbc_support = true;
1045
d99f38ae
AD
1046 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1047 init_data.flags.multi_mon_pp_mclk_switch = true;
1048
eaf56410
LL
1049 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1050 init_data.flags.disable_fractional_pwm = true;
1051
27eaa492 1052 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1053
48321c3d 1054 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 1055
4562236b
HW
1056 /* Display Core create. */
1057 adev->dm.dc = dc_create(&init_data);
1058
423788c7 1059 if (adev->dm.dc) {
76121231 1060 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1061 } else {
76121231 1062 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1063 goto error;
1064 }
4562236b 1065
8a791dab
HW
1066 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1067 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1068 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1069 }
1070
f99d8762
HW
1071 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1072 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1073
8a791dab
HW
1074 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1075 adev->dm.dc->debug.disable_stutter = true;
1076
1077 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1078 adev->dm.dc->debug.disable_dsc = true;
1079
1080 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1081 adev->dm.dc->debug.disable_clock_gate = true;
1082
743b9786
NK
1083 r = dm_dmub_hw_init(adev);
1084 if (r) {
1085 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1086 goto error;
1087 }
1088
bb6785c1
NK
1089 dc_hardware_init(adev->dm.dc);
1090
0b08c54b 1091#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1092 if (adev->apu_flags) {
e6cd859d
AD
1093 struct dc_phy_addr_space_config pa_config;
1094
0b08c54b 1095 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1096
0b08c54b
YZ
1097 // Call the DC init_memory func
1098 dc_setup_system_context(adev->dm.dc, &pa_config);
1099 }
1100#endif
c0fb85ae 1101
4562236b
HW
1102 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1103 if (!adev->dm.freesync_module) {
1104 DRM_ERROR(
1105 "amdgpu: failed to initialize freesync_module.\n");
1106 } else
f1ad2f5e 1107 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1108 adev->dm.freesync_module);
1109
e277adc5
LSL
1110 amdgpu_dm_init_color_mod();
1111
52704fca 1112#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1113 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1114 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1115
96a3b32e
BL
1116 if (!adev->dm.hdcp_workqueue)
1117 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1118 else
1119 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1120
96a3b32e
BL
1121 dc_init_callbacks(adev->dm.dc, &init_params);
1122 }
c920888c
WL
1123#endif
1124#ifdef CONFIG_DEBUG_FS
1125 if (create_crtc_crc_properties(&adev->dm))
1126 DRM_ERROR("amdgpu: failed to create crc property.\n");
52704fca 1127#endif
4562236b
HW
1128 if (amdgpu_dm_initialize_drm_device(adev)) {
1129 DRM_ERROR(
1130 "amdgpu: failed to initialize sw for display support.\n");
1131 goto error;
1132 }
1133
f74367e4
AD
1134 /* create fake encoders for MST */
1135 dm_dp_create_fake_mst_encoders(adev);
1136
4562236b
HW
1137 /* TODO: Add_display_info? */
1138
1139 /* TODO use dynamic cursor width */
4a580877
LT
1140 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1141 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1142
4a580877 1143 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1144 DRM_ERROR(
1145 "amdgpu: failed to initialize sw for display support.\n");
1146 goto error;
1147 }
1148
c0fb85ae 1149
f1ad2f5e 1150 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1151
1152 return 0;
1153error:
1154 amdgpu_dm_fini(adev);
1155
59d0f396 1156 return -EINVAL;
4562236b
HW
1157}
1158
7578ecda 1159static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1160{
f74367e4
AD
1161 int i;
1162
1163 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1164 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1165 }
1166
6ce8f316
NK
1167 amdgpu_dm_audio_fini(adev);
1168
4562236b 1169 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1170
52704fca
BL
1171#ifdef CONFIG_DRM_AMD_DC_HDCP
1172 if (adev->dm.hdcp_workqueue) {
1173 hdcp_destroy(adev->dm.hdcp_workqueue);
1174 adev->dm.hdcp_workqueue = NULL;
1175 }
1176
1177 if (adev->dm.dc)
1178 dc_deinit_callbacks(adev->dm.dc);
1179#endif
9a71c7d3
NK
1180 if (adev->dm.dc->ctx->dmub_srv) {
1181 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1182 adev->dm.dc->ctx->dmub_srv = NULL;
1183 }
1184
743b9786
NK
1185 if (adev->dm.dmub_bo)
1186 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1187 &adev->dm.dmub_bo_gpu_addr,
1188 &adev->dm.dmub_bo_cpu_addr);
52704fca 1189
c8bdf2b6
ED
1190 /* DC Destroy TODO: Replace destroy DAL */
1191 if (adev->dm.dc)
1192 dc_destroy(&adev->dm.dc);
4562236b
HW
1193 /*
1194 * TODO: pageflip, vlank interrupt
1195 *
1196 * amdgpu_dm_irq_fini(adev);
1197 */
1198
1199 if (adev->dm.cgs_device) {
1200 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1201 adev->dm.cgs_device = NULL;
1202 }
1203 if (adev->dm.freesync_module) {
1204 mod_freesync_destroy(adev->dm.freesync_module);
1205 adev->dm.freesync_module = NULL;
1206 }
674e78ac 1207
6ce8f316 1208 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1209 mutex_destroy(&adev->dm.dc_lock);
1210
4562236b
HW
1211 return;
1212}
1213
a94d5569 1214static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1215{
a7669aff 1216 const char *fw_name_dmcu = NULL;
a94d5569
DF
1217 int r;
1218 const struct dmcu_firmware_header_v1_0 *hdr;
1219
1220 switch(adev->asic_type) {
55e56389
MR
1221#if defined(CONFIG_DRM_AMD_DC_SI)
1222 case CHIP_TAHITI:
1223 case CHIP_PITCAIRN:
1224 case CHIP_VERDE:
1225 case CHIP_OLAND:
1226#endif
a94d5569
DF
1227 case CHIP_BONAIRE:
1228 case CHIP_HAWAII:
1229 case CHIP_KAVERI:
1230 case CHIP_KABINI:
1231 case CHIP_MULLINS:
1232 case CHIP_TONGA:
1233 case CHIP_FIJI:
1234 case CHIP_CARRIZO:
1235 case CHIP_STONEY:
1236 case CHIP_POLARIS11:
1237 case CHIP_POLARIS10:
1238 case CHIP_POLARIS12:
1239 case CHIP_VEGAM:
1240 case CHIP_VEGA10:
1241 case CHIP_VEGA12:
1242 case CHIP_VEGA20:
476e955d 1243 case CHIP_NAVI10:
baebcf2e 1244 case CHIP_NAVI14:
30221ad8 1245 case CHIP_RENOIR:
79037324 1246 case CHIP_SIENNA_CICHLID:
a6c5308f 1247 case CHIP_NAVY_FLOUNDER:
2a411205 1248 case CHIP_DIMGREY_CAVEFISH:
469989ca 1249 case CHIP_VANGOGH:
a94d5569 1250 return 0;
5ea23931
RL
1251 case CHIP_NAVI12:
1252 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1253 break;
a94d5569 1254 case CHIP_RAVEN:
a7669aff
HW
1255 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1256 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1257 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1258 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1259 else
a7669aff 1260 return 0;
a94d5569
DF
1261 break;
1262 default:
1263 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1264 return -EINVAL;
a94d5569
DF
1265 }
1266
1267 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1268 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1269 return 0;
1270 }
1271
1272 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1273 if (r == -ENOENT) {
1274 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1275 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1276 adev->dm.fw_dmcu = NULL;
1277 return 0;
1278 }
1279 if (r) {
1280 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1281 fw_name_dmcu);
1282 return r;
1283 }
1284
1285 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1286 if (r) {
1287 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1288 fw_name_dmcu);
1289 release_firmware(adev->dm.fw_dmcu);
1290 adev->dm.fw_dmcu = NULL;
1291 return r;
1292 }
1293
1294 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1295 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1296 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1297 adev->firmware.fw_size +=
1298 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1299
1300 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1301 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1302 adev->firmware.fw_size +=
1303 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1304
ee6e89c0
DF
1305 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1306
a94d5569
DF
1307 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1308
4562236b
HW
1309 return 0;
1310}
1311
743b9786
NK
1312static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1313{
1314 struct amdgpu_device *adev = ctx;
1315
1316 return dm_read_reg(adev->dm.dc->ctx, address);
1317}
1318
1319static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1320 uint32_t value)
1321{
1322 struct amdgpu_device *adev = ctx;
1323
1324 return dm_write_reg(adev->dm.dc->ctx, address, value);
1325}
1326
1327static int dm_dmub_sw_init(struct amdgpu_device *adev)
1328{
1329 struct dmub_srv_create_params create_params;
8c7aea40
NK
1330 struct dmub_srv_region_params region_params;
1331 struct dmub_srv_region_info region_info;
1332 struct dmub_srv_fb_params fb_params;
1333 struct dmub_srv_fb_info *fb_info;
1334 struct dmub_srv *dmub_srv;
743b9786
NK
1335 const struct dmcub_firmware_header_v1_0 *hdr;
1336 const char *fw_name_dmub;
1337 enum dmub_asic dmub_asic;
1338 enum dmub_status status;
1339 int r;
1340
1341 switch (adev->asic_type) {
1342 case CHIP_RENOIR:
1343 dmub_asic = DMUB_ASIC_DCN21;
1344 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1345 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1346 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1347 break;
79037324
BL
1348 case CHIP_SIENNA_CICHLID:
1349 dmub_asic = DMUB_ASIC_DCN30;
1350 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1351 break;
5ce868fc
BL
1352 case CHIP_NAVY_FLOUNDER:
1353 dmub_asic = DMUB_ASIC_DCN30;
1354 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1355 break;
469989ca
RL
1356 case CHIP_VANGOGH:
1357 dmub_asic = DMUB_ASIC_DCN301;
1358 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1359 break;
2a411205
BL
1360 case CHIP_DIMGREY_CAVEFISH:
1361 dmub_asic = DMUB_ASIC_DCN302;
1362 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1363 break;
743b9786
NK
1364
1365 default:
1366 /* ASIC doesn't support DMUB. */
1367 return 0;
1368 }
1369
743b9786
NK
1370 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1371 if (r) {
1372 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1373 return 0;
1374 }
1375
1376 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1377 if (r) {
1378 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1379 return 0;
1380 }
1381
743b9786 1382 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1383
9a6ed547
NK
1384 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1385 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1386 AMDGPU_UCODE_ID_DMCUB;
1387 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1388 adev->dm.dmub_fw;
1389 adev->firmware.fw_size +=
1390 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1391
9a6ed547
NK
1392 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1393 adev->dm.dmcub_fw_version);
1394 }
1395
1396 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1397
8c7aea40
NK
1398 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1399 dmub_srv = adev->dm.dmub_srv;
1400
1401 if (!dmub_srv) {
1402 DRM_ERROR("Failed to allocate DMUB service!\n");
1403 return -ENOMEM;
1404 }
1405
1406 memset(&create_params, 0, sizeof(create_params));
1407 create_params.user_ctx = adev;
1408 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1409 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1410 create_params.asic = dmub_asic;
1411
1412 /* Create the DMUB service. */
1413 status = dmub_srv_create(dmub_srv, &create_params);
1414 if (status != DMUB_STATUS_OK) {
1415 DRM_ERROR("Error creating DMUB service: %d\n", status);
1416 return -EINVAL;
1417 }
1418
1419 /* Calculate the size of all the regions for the DMUB service. */
1420 memset(&region_params, 0, sizeof(region_params));
1421
1422 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1423 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1424 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1425 region_params.vbios_size = adev->bios_size;
0922b899 1426 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1427 adev->dm.dmub_fw->data +
1428 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1429 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1430 region_params.fw_inst_const =
1431 adev->dm.dmub_fw->data +
1432 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1433 PSP_HEADER_BYTES;
8c7aea40
NK
1434
1435 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1436 &region_info);
1437
1438 if (status != DMUB_STATUS_OK) {
1439 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1440 return -EINVAL;
1441 }
1442
1443 /*
1444 * Allocate a framebuffer based on the total size of all the regions.
1445 * TODO: Move this into GART.
1446 */
1447 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1448 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1449 &adev->dm.dmub_bo_gpu_addr,
1450 &adev->dm.dmub_bo_cpu_addr);
1451 if (r)
1452 return r;
1453
1454 /* Rebase the regions on the framebuffer address. */
1455 memset(&fb_params, 0, sizeof(fb_params));
1456 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1457 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1458 fb_params.region_info = &region_info;
1459
1460 adev->dm.dmub_fb_info =
1461 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1462 fb_info = adev->dm.dmub_fb_info;
1463
1464 if (!fb_info) {
1465 DRM_ERROR(
1466 "Failed to allocate framebuffer info for DMUB service!\n");
1467 return -ENOMEM;
1468 }
1469
1470 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1471 if (status != DMUB_STATUS_OK) {
1472 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1473 return -EINVAL;
1474 }
1475
743b9786
NK
1476 return 0;
1477}
1478
a94d5569
DF
1479static int dm_sw_init(void *handle)
1480{
1481 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1482 int r;
1483
1484 r = dm_dmub_sw_init(adev);
1485 if (r)
1486 return r;
a94d5569
DF
1487
1488 return load_dmcu_fw(adev);
1489}
1490
4562236b
HW
1491static int dm_sw_fini(void *handle)
1492{
a94d5569
DF
1493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494
8c7aea40
NK
1495 kfree(adev->dm.dmub_fb_info);
1496 adev->dm.dmub_fb_info = NULL;
1497
743b9786
NK
1498 if (adev->dm.dmub_srv) {
1499 dmub_srv_destroy(adev->dm.dmub_srv);
1500 adev->dm.dmub_srv = NULL;
1501 }
1502
75e1658e
ND
1503 release_firmware(adev->dm.dmub_fw);
1504 adev->dm.dmub_fw = NULL;
743b9786 1505
75e1658e
ND
1506 release_firmware(adev->dm.fw_dmcu);
1507 adev->dm.fw_dmcu = NULL;
a94d5569 1508
4562236b
HW
1509 return 0;
1510}
1511
7abcf6b5 1512static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1513{
c84dec2f 1514 struct amdgpu_dm_connector *aconnector;
4562236b 1515 struct drm_connector *connector;
f8d2d39e 1516 struct drm_connector_list_iter iter;
7abcf6b5 1517 int ret = 0;
4562236b 1518
f8d2d39e
LP
1519 drm_connector_list_iter_begin(dev, &iter);
1520 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1521 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1522 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1523 aconnector->mst_mgr.aux) {
f1ad2f5e 1524 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1525 aconnector,
1526 aconnector->base.base.id);
7abcf6b5
AG
1527
1528 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1529 if (ret < 0) {
1530 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1531 aconnector->dc_link->type =
1532 dc_connection_single;
1533 break;
7abcf6b5 1534 }
f8d2d39e 1535 }
4562236b 1536 }
f8d2d39e 1537 drm_connector_list_iter_end(&iter);
4562236b 1538
7abcf6b5
AG
1539 return ret;
1540}
1541
1542static int dm_late_init(void *handle)
1543{
42e67c3b 1544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1545
bbf854dc
DF
1546 struct dmcu_iram_parameters params;
1547 unsigned int linear_lut[16];
1548 int i;
17bdb4a8 1549 struct dmcu *dmcu = NULL;
5cb32419 1550 bool ret = true;
bbf854dc 1551
17bdb4a8
JFZ
1552 dmcu = adev->dm.dc->res_pool->dmcu;
1553
bbf854dc
DF
1554 for (i = 0; i < 16; i++)
1555 linear_lut[i] = 0xFFFF * i / 15;
1556
1557 params.set = 0;
1558 params.backlight_ramping_start = 0xCCCC;
1559 params.backlight_ramping_reduction = 0xCCCCCCCC;
1560 params.backlight_lut_array_size = 16;
1561 params.backlight_lut_array = linear_lut;
1562
2ad0cdf9
AK
1563 /* Min backlight level after ABM reduction, Don't allow below 1%
1564 * 0xFFFF x 0.01 = 0x28F
1565 */
1566 params.min_abm_backlight = 0x28F;
1567
5cb32419
RL
1568 /* In the case where abm is implemented on dmcub,
1569 * dmcu object will be null.
1570 * ABM 2.4 and up are implemented on dmcub.
1571 */
1572 if (dmcu)
1573 ret = dmcu_load_iram(dmcu, params);
1574 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1575 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1576
14ed1c90
HW
1577 if (!ret)
1578 return -EINVAL;
bbf854dc 1579
4a580877 1580 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1581}
1582
1583static void s3_handle_mst(struct drm_device *dev, bool suspend)
1584{
c84dec2f 1585 struct amdgpu_dm_connector *aconnector;
4562236b 1586 struct drm_connector *connector;
f8d2d39e 1587 struct drm_connector_list_iter iter;
fe7553be
LP
1588 struct drm_dp_mst_topology_mgr *mgr;
1589 int ret;
1590 bool need_hotplug = false;
4562236b 1591
f8d2d39e
LP
1592 drm_connector_list_iter_begin(dev, &iter);
1593 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1594 aconnector = to_amdgpu_dm_connector(connector);
1595 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1596 aconnector->mst_port)
1597 continue;
1598
1599 mgr = &aconnector->mst_mgr;
1600
1601 if (suspend) {
1602 drm_dp_mst_topology_mgr_suspend(mgr);
1603 } else {
6f85f738 1604 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1605 if (ret < 0) {
1606 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1607 need_hotplug = true;
1608 }
1609 }
4562236b 1610 }
f8d2d39e 1611 drm_connector_list_iter_end(&iter);
fe7553be
LP
1612
1613 if (need_hotplug)
1614 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1615}
1616
9340dfd3
HW
1617static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1618{
1619 struct smu_context *smu = &adev->smu;
1620 int ret = 0;
1621
1622 if (!is_support_sw_smu(adev))
1623 return 0;
1624
1625 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1626 * on window driver dc implementation.
1627 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1628 * should be passed to smu during boot up and resume from s3.
1629 * boot up: dc calculate dcn watermark clock settings within dc_create,
1630 * dcn20_resource_construct
1631 * then call pplib functions below to pass the settings to smu:
1632 * smu_set_watermarks_for_clock_ranges
1633 * smu_set_watermarks_table
1634 * navi10_set_watermarks_table
1635 * smu_write_watermarks_table
1636 *
1637 * For Renoir, clock settings of dcn watermark are also fixed values.
1638 * dc has implemented different flow for window driver:
1639 * dc_hardware_init / dc_set_power_state
1640 * dcn10_init_hw
1641 * notify_wm_ranges
1642 * set_wm_ranges
1643 * -- Linux
1644 * smu_set_watermarks_for_clock_ranges
1645 * renoir_set_watermarks_table
1646 * smu_write_watermarks_table
1647 *
1648 * For Linux,
1649 * dc_hardware_init -> amdgpu_dm_init
1650 * dc_set_power_state --> dm_resume
1651 *
1652 * therefore, this function apply to navi10/12/14 but not Renoir
1653 * *
1654 */
1655 switch(adev->asic_type) {
1656 case CHIP_NAVI10:
1657 case CHIP_NAVI14:
1658 case CHIP_NAVI12:
1659 break;
1660 default:
1661 return 0;
1662 }
1663
e7a95eea
EQ
1664 ret = smu_write_watermarks_table(smu);
1665 if (ret) {
1666 DRM_ERROR("Failed to update WMTABLE!\n");
1667 return ret;
9340dfd3
HW
1668 }
1669
9340dfd3
HW
1670 return 0;
1671}
1672
b8592b48
LL
1673/**
1674 * dm_hw_init() - Initialize DC device
28d687ea 1675 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1676 *
1677 * Initialize the &struct amdgpu_display_manager device. This involves calling
1678 * the initializers of each DM component, then populating the struct with them.
1679 *
1680 * Although the function implies hardware initialization, both hardware and
1681 * software are initialized here. Splitting them out to their relevant init
1682 * hooks is a future TODO item.
1683 *
1684 * Some notable things that are initialized here:
1685 *
1686 * - Display Core, both software and hardware
1687 * - DC modules that we need (freesync and color management)
1688 * - DRM software states
1689 * - Interrupt sources and handlers
1690 * - Vblank support
1691 * - Debug FS entries, if enabled
1692 */
4562236b
HW
1693static int dm_hw_init(void *handle)
1694{
1695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696 /* Create DAL display manager */
1697 amdgpu_dm_init(adev);
4562236b
HW
1698 amdgpu_dm_hpd_init(adev);
1699
4562236b
HW
1700 return 0;
1701}
1702
b8592b48
LL
1703/**
1704 * dm_hw_fini() - Teardown DC device
28d687ea 1705 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1706 *
1707 * Teardown components within &struct amdgpu_display_manager that require
1708 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1709 * were loaded. Also flush IRQ workqueues and disable them.
1710 */
4562236b
HW
1711static int dm_hw_fini(void *handle)
1712{
1713 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1714
1715 amdgpu_dm_hpd_fini(adev);
1716
1717 amdgpu_dm_irq_fini(adev);
21de3396 1718 amdgpu_dm_fini(adev);
4562236b
HW
1719 return 0;
1720}
1721
cdaae837
BL
1722
1723static int dm_enable_vblank(struct drm_crtc *crtc);
1724static void dm_disable_vblank(struct drm_crtc *crtc);
1725
1726static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1727 struct dc_state *state, bool enable)
1728{
1729 enum dc_irq_source irq_source;
1730 struct amdgpu_crtc *acrtc;
1731 int rc = -EBUSY;
1732 int i = 0;
1733
1734 for (i = 0; i < state->stream_count; i++) {
1735 acrtc = get_crtc_by_otg_inst(
1736 adev, state->stream_status[i].primary_otg_inst);
1737
1738 if (acrtc && state->stream_status[i].plane_count != 0) {
1739 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1740 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1741 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1742 acrtc->crtc_id, enable ? "en" : "dis", rc);
1743 if (rc)
1744 DRM_WARN("Failed to %s pflip interrupts\n",
1745 enable ? "enable" : "disable");
1746
1747 if (enable) {
1748 rc = dm_enable_vblank(&acrtc->base);
1749 if (rc)
1750 DRM_WARN("Failed to enable vblank interrupts\n");
1751 } else {
1752 dm_disable_vblank(&acrtc->base);
1753 }
1754
1755 }
1756 }
1757
1758}
1759
dfd84d90 1760static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1761{
1762 struct dc_state *context = NULL;
1763 enum dc_status res = DC_ERROR_UNEXPECTED;
1764 int i;
1765 struct dc_stream_state *del_streams[MAX_PIPES];
1766 int del_streams_count = 0;
1767
1768 memset(del_streams, 0, sizeof(del_streams));
1769
1770 context = dc_create_state(dc);
1771 if (context == NULL)
1772 goto context_alloc_fail;
1773
1774 dc_resource_state_copy_construct_current(dc, context);
1775
1776 /* First remove from context all streams */
1777 for (i = 0; i < context->stream_count; i++) {
1778 struct dc_stream_state *stream = context->streams[i];
1779
1780 del_streams[del_streams_count++] = stream;
1781 }
1782
1783 /* Remove all planes for removed streams and then remove the streams */
1784 for (i = 0; i < del_streams_count; i++) {
1785 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1786 res = DC_FAIL_DETACH_SURFACES;
1787 goto fail;
1788 }
1789
1790 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1791 if (res != DC_OK)
1792 goto fail;
1793 }
1794
1795
1796 res = dc_validate_global_state(dc, context, false);
1797
1798 if (res != DC_OK) {
1799 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1800 goto fail;
1801 }
1802
1803 res = dc_commit_state(dc, context);
1804
1805fail:
1806 dc_release_state(context);
1807
1808context_alloc_fail:
1809 return res;
1810}
1811
4562236b
HW
1812static int dm_suspend(void *handle)
1813{
1814 struct amdgpu_device *adev = handle;
1815 struct amdgpu_display_manager *dm = &adev->dm;
1816 int ret = 0;
4562236b 1817
53b3f8f4 1818 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1819 mutex_lock(&dm->dc_lock);
1820 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1821
1822 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1823
1824 amdgpu_dm_commit_zero_streams(dm->dc);
1825
1826 amdgpu_dm_irq_suspend(adev);
1827
1828 return ret;
1829 }
4562236b 1830
d2f0b53b 1831 WARN_ON(adev->dm.cached_state);
4a580877 1832 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1833
4a580877 1834 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1835
4562236b
HW
1836 amdgpu_dm_irq_suspend(adev);
1837
a3621485 1838
32f5062d 1839 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1840
1c2075d4 1841 return 0;
4562236b
HW
1842}
1843
1daf8c63
AD
1844static struct amdgpu_dm_connector *
1845amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1846 struct drm_crtc *crtc)
4562236b
HW
1847{
1848 uint32_t i;
c2cea706 1849 struct drm_connector_state *new_con_state;
4562236b
HW
1850 struct drm_connector *connector;
1851 struct drm_crtc *crtc_from_state;
1852
c2cea706
LSL
1853 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1854 crtc_from_state = new_con_state->crtc;
4562236b
HW
1855
1856 if (crtc_from_state == crtc)
c84dec2f 1857 return to_amdgpu_dm_connector(connector);
4562236b
HW
1858 }
1859
1860 return NULL;
1861}
1862
fbbdadf2
BL
1863static void emulated_link_detect(struct dc_link *link)
1864{
1865 struct dc_sink_init_data sink_init_data = { 0 };
1866 struct display_sink_capability sink_caps = { 0 };
1867 enum dc_edid_status edid_status;
1868 struct dc_context *dc_ctx = link->ctx;
1869 struct dc_sink *sink = NULL;
1870 struct dc_sink *prev_sink = NULL;
1871
1872 link->type = dc_connection_none;
1873 prev_sink = link->local_sink;
1874
1875 if (prev_sink != NULL)
1876 dc_sink_retain(prev_sink);
1877
1878 switch (link->connector_signal) {
1879 case SIGNAL_TYPE_HDMI_TYPE_A: {
1880 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1881 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1882 break;
1883 }
1884
1885 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1886 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1887 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1888 break;
1889 }
1890
1891 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1892 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1893 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1894 break;
1895 }
1896
1897 case SIGNAL_TYPE_LVDS: {
1898 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1899 sink_caps.signal = SIGNAL_TYPE_LVDS;
1900 break;
1901 }
1902
1903 case SIGNAL_TYPE_EDP: {
1904 sink_caps.transaction_type =
1905 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1906 sink_caps.signal = SIGNAL_TYPE_EDP;
1907 break;
1908 }
1909
1910 case SIGNAL_TYPE_DISPLAY_PORT: {
1911 sink_caps.transaction_type =
1912 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1913 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1914 break;
1915 }
1916
1917 default:
1918 DC_ERROR("Invalid connector type! signal:%d\n",
1919 link->connector_signal);
1920 return;
1921 }
1922
1923 sink_init_data.link = link;
1924 sink_init_data.sink_signal = sink_caps.signal;
1925
1926 sink = dc_sink_create(&sink_init_data);
1927 if (!sink) {
1928 DC_ERROR("Failed to create sink!\n");
1929 return;
1930 }
1931
dcd5fb82 1932 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1933 link->local_sink = sink;
1934
1935 edid_status = dm_helpers_read_local_edid(
1936 link->ctx,
1937 link,
1938 sink);
1939
1940 if (edid_status != EDID_OK)
1941 DC_ERROR("Failed to read EDID");
1942
1943}
1944
cdaae837
BL
1945static void dm_gpureset_commit_state(struct dc_state *dc_state,
1946 struct amdgpu_display_manager *dm)
1947{
1948 struct {
1949 struct dc_surface_update surface_updates[MAX_SURFACES];
1950 struct dc_plane_info plane_infos[MAX_SURFACES];
1951 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1952 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1953 struct dc_stream_update stream_update;
1954 } * bundle;
1955 int k, m;
1956
1957 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1958
1959 if (!bundle) {
1960 dm_error("Failed to allocate update bundle\n");
1961 goto cleanup;
1962 }
1963
1964 for (k = 0; k < dc_state->stream_count; k++) {
1965 bundle->stream_update.stream = dc_state->streams[k];
1966
1967 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1968 bundle->surface_updates[m].surface =
1969 dc_state->stream_status->plane_states[m];
1970 bundle->surface_updates[m].surface->force_full_update =
1971 true;
1972 }
1973 dc_commit_updates_for_stream(
1974 dm->dc, bundle->surface_updates,
1975 dc_state->stream_status->plane_count,
1976 dc_state->streams[k], &bundle->stream_update, dc_state);
1977 }
1978
1979cleanup:
1980 kfree(bundle);
1981
1982 return;
1983}
1984
3c4d55c9
AP
1985static void dm_set_dpms_off(struct dc_link *link)
1986{
1987 struct dc_stream_state *stream_state;
1988 struct amdgpu_dm_connector *aconnector = link->priv;
1989 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1990 struct dc_stream_update stream_update;
1991 bool dpms_off = true;
1992
1993 memset(&stream_update, 0, sizeof(stream_update));
1994 stream_update.dpms_off = &dpms_off;
1995
1996 mutex_lock(&adev->dm.dc_lock);
1997 stream_state = dc_stream_find_from_link(link);
1998
1999 if (stream_state == NULL) {
2000 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2001 mutex_unlock(&adev->dm.dc_lock);
2002 return;
2003 }
2004
2005 stream_update.stream = stream_state;
2006 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2007 stream_state, &stream_update,
2008 stream_state->ctx->dc->current_state);
2009 mutex_unlock(&adev->dm.dc_lock);
2010}
2011
4562236b
HW
2012static int dm_resume(void *handle)
2013{
2014 struct amdgpu_device *adev = handle;
4a580877 2015 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2016 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2017 struct amdgpu_dm_connector *aconnector;
4562236b 2018 struct drm_connector *connector;
f8d2d39e 2019 struct drm_connector_list_iter iter;
4562236b 2020 struct drm_crtc *crtc;
c2cea706 2021 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2022 struct dm_crtc_state *dm_new_crtc_state;
2023 struct drm_plane *plane;
2024 struct drm_plane_state *new_plane_state;
2025 struct dm_plane_state *dm_new_plane_state;
113b7a01 2026 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2027 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2028 struct dc_state *dc_state;
2029 int i, r, j;
4562236b 2030
53b3f8f4 2031 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2032 dc_state = dm->cached_dc_state;
2033
2034 r = dm_dmub_hw_init(adev);
2035 if (r)
2036 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2037
2038 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2039 dc_resume(dm->dc);
2040
2041 amdgpu_dm_irq_resume_early(adev);
2042
2043 for (i = 0; i < dc_state->stream_count; i++) {
2044 dc_state->streams[i]->mode_changed = true;
2045 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2046 dc_state->stream_status->plane_states[j]->update_flags.raw
2047 = 0xffffffff;
2048 }
2049 }
2050
2051 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2052
cdaae837
BL
2053 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2054
2055 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2056
2057 dc_release_state(dm->cached_dc_state);
2058 dm->cached_dc_state = NULL;
2059
2060 amdgpu_dm_irq_resume_late(adev);
2061
2062 mutex_unlock(&dm->dc_lock);
2063
2064 return 0;
2065 }
113b7a01
LL
2066 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2067 dc_release_state(dm_state->context);
2068 dm_state->context = dc_create_state(dm->dc);
2069 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2070 dc_resource_state_construct(dm->dc, dm_state->context);
2071
8c7aea40
NK
2072 /* Before powering on DC we need to re-initialize DMUB. */
2073 r = dm_dmub_hw_init(adev);
2074 if (r)
2075 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2076
a80aa93d
ML
2077 /* power on hardware */
2078 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2079
4562236b
HW
2080 /* program HPD filter */
2081 dc_resume(dm->dc);
2082
4562236b
HW
2083 /*
2084 * early enable HPD Rx IRQ, should be done before set mode as short
2085 * pulse interrupts are used for MST
2086 */
2087 amdgpu_dm_irq_resume_early(adev);
2088
d20ebea8 2089 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2090 s3_handle_mst(ddev, false);
2091
4562236b 2092 /* Do detection*/
f8d2d39e
LP
2093 drm_connector_list_iter_begin(ddev, &iter);
2094 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2095 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2096
2097 /*
2098 * this is the case when traversing through already created
2099 * MST connectors, should be skipped
2100 */
2101 if (aconnector->mst_port)
2102 continue;
2103
03ea364c 2104 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2105 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2106 DRM_ERROR("KMS: Failed to detect connector\n");
2107
2108 if (aconnector->base.force && new_connection_type == dc_connection_none)
2109 emulated_link_detect(aconnector->dc_link);
2110 else
2111 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2112
2113 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2114 aconnector->fake_enable = false;
2115
dcd5fb82
MF
2116 if (aconnector->dc_sink)
2117 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2118 aconnector->dc_sink = NULL;
2119 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2120 mutex_unlock(&aconnector->hpd_lock);
4562236b 2121 }
f8d2d39e 2122 drm_connector_list_iter_end(&iter);
4562236b 2123
1f6010a9 2124 /* Force mode set in atomic commit */
a80aa93d 2125 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2126 new_crtc_state->active_changed = true;
4f346e65 2127
fcb4019e
LSL
2128 /*
2129 * atomic_check is expected to create the dc states. We need to release
2130 * them here, since they were duplicated as part of the suspend
2131 * procedure.
2132 */
a80aa93d 2133 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2134 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2135 if (dm_new_crtc_state->stream) {
2136 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2137 dc_stream_release(dm_new_crtc_state->stream);
2138 dm_new_crtc_state->stream = NULL;
2139 }
2140 }
2141
a80aa93d 2142 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2143 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2144 if (dm_new_plane_state->dc_state) {
2145 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2146 dc_plane_state_release(dm_new_plane_state->dc_state);
2147 dm_new_plane_state->dc_state = NULL;
2148 }
2149 }
2150
2d1af6a1 2151 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2152
a80aa93d 2153 dm->cached_state = NULL;
0a214e2f 2154
9faa4237 2155 amdgpu_dm_irq_resume_late(adev);
4562236b 2156
9340dfd3
HW
2157 amdgpu_dm_smu_write_watermarks_table(adev);
2158
2d1af6a1 2159 return 0;
4562236b
HW
2160}
2161
b8592b48
LL
2162/**
2163 * DOC: DM Lifecycle
2164 *
2165 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2166 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2167 * the base driver's device list to be initialized and torn down accordingly.
2168 *
2169 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2170 */
2171
4562236b
HW
2172static const struct amd_ip_funcs amdgpu_dm_funcs = {
2173 .name = "dm",
2174 .early_init = dm_early_init,
7abcf6b5 2175 .late_init = dm_late_init,
4562236b
HW
2176 .sw_init = dm_sw_init,
2177 .sw_fini = dm_sw_fini,
2178 .hw_init = dm_hw_init,
2179 .hw_fini = dm_hw_fini,
2180 .suspend = dm_suspend,
2181 .resume = dm_resume,
2182 .is_idle = dm_is_idle,
2183 .wait_for_idle = dm_wait_for_idle,
2184 .check_soft_reset = dm_check_soft_reset,
2185 .soft_reset = dm_soft_reset,
2186 .set_clockgating_state = dm_set_clockgating_state,
2187 .set_powergating_state = dm_set_powergating_state,
2188};
2189
2190const struct amdgpu_ip_block_version dm_ip_block =
2191{
2192 .type = AMD_IP_BLOCK_TYPE_DCE,
2193 .major = 1,
2194 .minor = 0,
2195 .rev = 0,
2196 .funcs = &amdgpu_dm_funcs,
2197};
2198
ca3268c4 2199
b8592b48
LL
2200/**
2201 * DOC: atomic
2202 *
2203 * *WIP*
2204 */
0a323b84 2205
b3663f70 2206static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2207 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2208 .get_format_info = amd_get_format_info,
366c1baa 2209 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2210 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2211 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2212};
2213
2214static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2215 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2216};
2217
94562810
RS
2218static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2219{
2220 u32 max_cll, min_cll, max, min, q, r;
2221 struct amdgpu_dm_backlight_caps *caps;
2222 struct amdgpu_display_manager *dm;
2223 struct drm_connector *conn_base;
2224 struct amdgpu_device *adev;
ec11fe37 2225 struct dc_link *link = NULL;
94562810
RS
2226 static const u8 pre_computed_values[] = {
2227 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2228 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2229
2230 if (!aconnector || !aconnector->dc_link)
2231 return;
2232
ec11fe37 2233 link = aconnector->dc_link;
2234 if (link->connector_signal != SIGNAL_TYPE_EDP)
2235 return;
2236
94562810 2237 conn_base = &aconnector->base;
1348969a 2238 adev = drm_to_adev(conn_base->dev);
94562810
RS
2239 dm = &adev->dm;
2240 caps = &dm->backlight_caps;
2241 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2242 caps->aux_support = false;
2243 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2244 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2245
2246 if (caps->ext_caps->bits.oled == 1 ||
2247 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2248 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2249 caps->aux_support = true;
2250
2251 /* From the specification (CTA-861-G), for calculating the maximum
2252 * luminance we need to use:
2253 * Luminance = 50*2**(CV/32)
2254 * Where CV is a one-byte value.
2255 * For calculating this expression we may need float point precision;
2256 * to avoid this complexity level, we take advantage that CV is divided
2257 * by a constant. From the Euclids division algorithm, we know that CV
2258 * can be written as: CV = 32*q + r. Next, we replace CV in the
2259 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2260 * need to pre-compute the value of r/32. For pre-computing the values
2261 * We just used the following Ruby line:
2262 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2263 * The results of the above expressions can be verified at
2264 * pre_computed_values.
2265 */
2266 q = max_cll >> 5;
2267 r = max_cll % 32;
2268 max = (1 << q) * pre_computed_values[r];
2269
2270 // min luminance: maxLum * (CV/255)^2 / 100
2271 q = DIV_ROUND_CLOSEST(min_cll, 255);
2272 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2273
2274 caps->aux_max_input_signal = max;
2275 caps->aux_min_input_signal = min;
2276}
2277
97e51c16
HW
2278void amdgpu_dm_update_connector_after_detect(
2279 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2280{
2281 struct drm_connector *connector = &aconnector->base;
2282 struct drm_device *dev = connector->dev;
b73a22d3 2283 struct dc_sink *sink;
4562236b
HW
2284
2285 /* MST handled by drm_mst framework */
2286 if (aconnector->mst_mgr.mst_state == true)
2287 return;
2288
4562236b 2289 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2290 if (sink)
2291 dc_sink_retain(sink);
4562236b 2292
1f6010a9
DF
2293 /*
2294 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2295 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2296 * Skip if already done during boot.
4562236b
HW
2297 */
2298 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2299 && aconnector->dc_em_sink) {
2300
1f6010a9
DF
2301 /*
2302 * For S3 resume with headless use eml_sink to fake stream
2303 * because on resume connector->sink is set to NULL
4562236b
HW
2304 */
2305 mutex_lock(&dev->mode_config.mutex);
2306
2307 if (sink) {
922aa1e1 2308 if (aconnector->dc_sink) {
98e6436d 2309 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2310 /*
2311 * retain and release below are used to
2312 * bump up refcount for sink because the link doesn't point
2313 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2314 * reshuffle by UMD we will get into unwanted dc_sink release
2315 */
dcd5fb82 2316 dc_sink_release(aconnector->dc_sink);
922aa1e1 2317 }
4562236b 2318 aconnector->dc_sink = sink;
dcd5fb82 2319 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2320 amdgpu_dm_update_freesync_caps(connector,
2321 aconnector->edid);
4562236b 2322 } else {
98e6436d 2323 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2324 if (!aconnector->dc_sink) {
4562236b 2325 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2326 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2327 }
4562236b
HW
2328 }
2329
2330 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2331
2332 if (sink)
2333 dc_sink_release(sink);
4562236b
HW
2334 return;
2335 }
2336
2337 /*
2338 * TODO: temporary guard to look for proper fix
2339 * if this sink is MST sink, we should not do anything
2340 */
dcd5fb82
MF
2341 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2342 dc_sink_release(sink);
4562236b 2343 return;
dcd5fb82 2344 }
4562236b
HW
2345
2346 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2347 /*
2348 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2349 * Do nothing!!
2350 */
f1ad2f5e 2351 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2352 aconnector->connector_id);
dcd5fb82
MF
2353 if (sink)
2354 dc_sink_release(sink);
4562236b
HW
2355 return;
2356 }
2357
f1ad2f5e 2358 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2359 aconnector->connector_id, aconnector->dc_sink, sink);
2360
2361 mutex_lock(&dev->mode_config.mutex);
2362
1f6010a9
DF
2363 /*
2364 * 1. Update status of the drm connector
2365 * 2. Send an event and let userspace tell us what to do
2366 */
4562236b 2367 if (sink) {
1f6010a9
DF
2368 /*
2369 * TODO: check if we still need the S3 mode update workaround.
2370 * If yes, put it here.
2371 */
4562236b 2372 if (aconnector->dc_sink)
98e6436d 2373 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2374
2375 aconnector->dc_sink = sink;
dcd5fb82 2376 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2377 if (sink->dc_edid.length == 0) {
4562236b 2378 aconnector->edid = NULL;
e6142dd5
AP
2379 if (aconnector->dc_link->aux_mode) {
2380 drm_dp_cec_unset_edid(
2381 &aconnector->dm_dp_aux.aux);
2382 }
900b3cb1 2383 } else {
4562236b 2384 aconnector->edid =
e6142dd5 2385 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2386
c555f023 2387 drm_connector_update_edid_property(connector,
e6142dd5 2388 aconnector->edid);
b24bdc37 2389 drm_add_edid_modes(connector, aconnector->edid);
e6142dd5
AP
2390
2391 if (aconnector->dc_link->aux_mode)
2392 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2393 aconnector->edid);
4562236b 2394 }
e6142dd5 2395
98e6436d 2396 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2397 update_connector_ext_caps(aconnector);
4562236b 2398 } else {
e86e8947 2399 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2400 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2401 drm_connector_update_edid_property(connector, NULL);
4562236b 2402 aconnector->num_modes = 0;
dcd5fb82 2403 dc_sink_release(aconnector->dc_sink);
4562236b 2404 aconnector->dc_sink = NULL;
5326c452 2405 aconnector->edid = NULL;
0c8620d6
BL
2406#ifdef CONFIG_DRM_AMD_DC_HDCP
2407 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2408 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2409 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2410#endif
4562236b
HW
2411 }
2412
2413 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2414
0f877894
OV
2415 update_subconnector_property(aconnector);
2416
dcd5fb82
MF
2417 if (sink)
2418 dc_sink_release(sink);
4562236b
HW
2419}
2420
2421static void handle_hpd_irq(void *param)
2422{
c84dec2f 2423 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2424 struct drm_connector *connector = &aconnector->base;
2425 struct drm_device *dev = connector->dev;
fbbdadf2 2426 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2427#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2428 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2429 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2430#endif
4562236b 2431
1f6010a9
DF
2432 /*
2433 * In case of failure or MST no need to update connector status or notify the OS
2434 * since (for MST case) MST does this in its own context.
4562236b
HW
2435 */
2436 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2437
0c8620d6 2438#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2439 if (adev->dm.hdcp_workqueue) {
96a3b32e 2440 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2441 dm_con_state->update_hdcp = true;
2442 }
0c8620d6 2443#endif
2e0ac3d6
HW
2444 if (aconnector->fake_enable)
2445 aconnector->fake_enable = false;
2446
fbbdadf2
BL
2447 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2448 DRM_ERROR("KMS: Failed to detect connector\n");
2449
2450 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2451 emulated_link_detect(aconnector->dc_link);
2452
2453
2454 drm_modeset_lock_all(dev);
2455 dm_restore_drm_connector_state(dev, connector);
2456 drm_modeset_unlock_all(dev);
2457
2458 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2459 drm_kms_helper_hotplug_event(dev);
2460
2461 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2462 if (new_connection_type == dc_connection_none &&
2463 aconnector->dc_link->type == dc_connection_none)
2464 dm_set_dpms_off(aconnector->dc_link);
4562236b 2465
3c4d55c9 2466 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2467
2468 drm_modeset_lock_all(dev);
2469 dm_restore_drm_connector_state(dev, connector);
2470 drm_modeset_unlock_all(dev);
2471
2472 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2473 drm_kms_helper_hotplug_event(dev);
2474 }
2475 mutex_unlock(&aconnector->hpd_lock);
2476
2477}
2478
c84dec2f 2479static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2480{
2481 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2482 uint8_t dret;
2483 bool new_irq_handled = false;
2484 int dpcd_addr;
2485 int dpcd_bytes_to_read;
2486
2487 const int max_process_count = 30;
2488 int process_count = 0;
2489
2490 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2491
2492 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2493 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2494 /* DPCD 0x200 - 0x201 for downstream IRQ */
2495 dpcd_addr = DP_SINK_COUNT;
2496 } else {
2497 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2498 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2499 dpcd_addr = DP_SINK_COUNT_ESI;
2500 }
2501
2502 dret = drm_dp_dpcd_read(
2503 &aconnector->dm_dp_aux.aux,
2504 dpcd_addr,
2505 esi,
2506 dpcd_bytes_to_read);
2507
2508 while (dret == dpcd_bytes_to_read &&
2509 process_count < max_process_count) {
2510 uint8_t retry;
2511 dret = 0;
2512
2513 process_count++;
2514
f1ad2f5e 2515 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2516 /* handle HPD short pulse irq */
2517 if (aconnector->mst_mgr.mst_state)
2518 drm_dp_mst_hpd_irq(
2519 &aconnector->mst_mgr,
2520 esi,
2521 &new_irq_handled);
4562236b
HW
2522
2523 if (new_irq_handled) {
2524 /* ACK at DPCD to notify down stream */
2525 const int ack_dpcd_bytes_to_write =
2526 dpcd_bytes_to_read - 1;
2527
2528 for (retry = 0; retry < 3; retry++) {
2529 uint8_t wret;
2530
2531 wret = drm_dp_dpcd_write(
2532 &aconnector->dm_dp_aux.aux,
2533 dpcd_addr + 1,
2534 &esi[1],
2535 ack_dpcd_bytes_to_write);
2536 if (wret == ack_dpcd_bytes_to_write)
2537 break;
2538 }
2539
1f6010a9 2540 /* check if there is new irq to be handled */
4562236b
HW
2541 dret = drm_dp_dpcd_read(
2542 &aconnector->dm_dp_aux.aux,
2543 dpcd_addr,
2544 esi,
2545 dpcd_bytes_to_read);
2546
2547 new_irq_handled = false;
d4a6e8a9 2548 } else {
4562236b 2549 break;
d4a6e8a9 2550 }
4562236b
HW
2551 }
2552
2553 if (process_count == max_process_count)
f1ad2f5e 2554 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2555}
2556
2557static void handle_hpd_rx_irq(void *param)
2558{
c84dec2f 2559 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2560 struct drm_connector *connector = &aconnector->base;
2561 struct drm_device *dev = connector->dev;
53cbf65c 2562 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2563 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2564 bool result = false;
fbbdadf2 2565 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2566 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2567 union hpd_irq_data hpd_irq_data;
2a0f9270
BL
2568
2569 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2570
1f6010a9
DF
2571 /*
2572 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2573 * conflict, after implement i2c helper, this mutex should be
2574 * retired.
2575 */
53cbf65c 2576 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2577 mutex_lock(&aconnector->hpd_lock);
2578
3083a984
QZ
2579 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2580
2581 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2582 (dc_link->type == dc_connection_mst_branch)) {
2583 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2584 result = true;
2585 dm_handle_hpd_rx_irq(aconnector);
2586 goto out;
2587 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2588 result = false;
2589 dm_handle_hpd_rx_irq(aconnector);
2590 goto out;
2591 }
2592 }
2593
c8ea79a8 2594 mutex_lock(&adev->dm.dc_lock);
2a0f9270 2595#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2596 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2597#else
c8ea79a8 2598 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2599#endif
c8ea79a8
QZ
2600 mutex_unlock(&adev->dm.dc_lock);
2601
3083a984 2602out:
c8ea79a8 2603 if (result && !is_mst_root_connector) {
4562236b 2604 /* Downstream Port status changed. */
fbbdadf2
BL
2605 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2606 DRM_ERROR("KMS: Failed to detect connector\n");
2607
2608 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2609 emulated_link_detect(dc_link);
2610
2611 if (aconnector->fake_enable)
2612 aconnector->fake_enable = false;
2613
2614 amdgpu_dm_update_connector_after_detect(aconnector);
2615
2616
2617 drm_modeset_lock_all(dev);
2618 dm_restore_drm_connector_state(dev, connector);
2619 drm_modeset_unlock_all(dev);
2620
2621 drm_kms_helper_hotplug_event(dev);
2622 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2623
2624 if (aconnector->fake_enable)
2625 aconnector->fake_enable = false;
2626
4562236b
HW
2627 amdgpu_dm_update_connector_after_detect(aconnector);
2628
2629
2630 drm_modeset_lock_all(dev);
2631 dm_restore_drm_connector_state(dev, connector);
2632 drm_modeset_unlock_all(dev);
2633
2634 drm_kms_helper_hotplug_event(dev);
2635 }
2636 }
2a0f9270 2637#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2638 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2639 if (adev->dm.hdcp_workqueue)
2640 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2641 }
2a0f9270 2642#endif
4562236b 2643
e86e8947
HV
2644 if (dc_link->type != dc_connection_mst_branch) {
2645 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2646 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2647 }
4562236b
HW
2648}
2649
2650static void register_hpd_handlers(struct amdgpu_device *adev)
2651{
4a580877 2652 struct drm_device *dev = adev_to_drm(adev);
4562236b 2653 struct drm_connector *connector;
c84dec2f 2654 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2655 const struct dc_link *dc_link;
2656 struct dc_interrupt_params int_params = {0};
2657
2658 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2659 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2660
2661 list_for_each_entry(connector,
2662 &dev->mode_config.connector_list, head) {
2663
c84dec2f 2664 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2665 dc_link = aconnector->dc_link;
2666
2667 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2668 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2669 int_params.irq_source = dc_link->irq_source_hpd;
2670
2671 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2672 handle_hpd_irq,
2673 (void *) aconnector);
2674 }
2675
2676 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2677
2678 /* Also register for DP short pulse (hpd_rx). */
2679 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2680 int_params.irq_source = dc_link->irq_source_hpd_rx;
2681
2682 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2683 handle_hpd_rx_irq,
2684 (void *) aconnector);
2685 }
2686 }
2687}
2688
55e56389
MR
2689#if defined(CONFIG_DRM_AMD_DC_SI)
2690/* Register IRQ sources and initialize IRQ callbacks */
2691static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2692{
2693 struct dc *dc = adev->dm.dc;
2694 struct common_irq_params *c_irq_params;
2695 struct dc_interrupt_params int_params = {0};
2696 int r;
2697 int i;
2698 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2699
2700 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2701 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2702
2703 /*
2704 * Actions of amdgpu_irq_add_id():
2705 * 1. Register a set() function with base driver.
2706 * Base driver will call set() function to enable/disable an
2707 * interrupt in DC hardware.
2708 * 2. Register amdgpu_dm_irq_handler().
2709 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2710 * coming from DC hardware.
2711 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2712 * for acknowledging and handling. */
2713
2714 /* Use VBLANK interrupt */
2715 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2716 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2717 if (r) {
2718 DRM_ERROR("Failed to add crtc irq id!\n");
2719 return r;
2720 }
2721
2722 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2723 int_params.irq_source =
2724 dc_interrupt_to_irq_source(dc, i+1 , 0);
2725
2726 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2727
2728 c_irq_params->adev = adev;
2729 c_irq_params->irq_src = int_params.irq_source;
2730
2731 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2732 dm_crtc_high_irq, c_irq_params);
2733 }
2734
2735 /* Use GRPH_PFLIP interrupt */
2736 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2737 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2738 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2739 if (r) {
2740 DRM_ERROR("Failed to add page flip irq id!\n");
2741 return r;
2742 }
2743
2744 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2745 int_params.irq_source =
2746 dc_interrupt_to_irq_source(dc, i, 0);
2747
2748 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2749
2750 c_irq_params->adev = adev;
2751 c_irq_params->irq_src = int_params.irq_source;
2752
2753 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2754 dm_pflip_high_irq, c_irq_params);
2755
2756 }
2757
2758 /* HPD */
2759 r = amdgpu_irq_add_id(adev, client_id,
2760 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2761 if (r) {
2762 DRM_ERROR("Failed to add hpd irq id!\n");
2763 return r;
2764 }
2765
2766 register_hpd_handlers(adev);
2767
2768 return 0;
2769}
2770#endif
2771
4562236b
HW
2772/* Register IRQ sources and initialize IRQ callbacks */
2773static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2774{
2775 struct dc *dc = adev->dm.dc;
2776 struct common_irq_params *c_irq_params;
2777 struct dc_interrupt_params int_params = {0};
2778 int r;
2779 int i;
1ffdeca6 2780 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2781
84374725 2782 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2783 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2784
2785 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2786 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2787
1f6010a9
DF
2788 /*
2789 * Actions of amdgpu_irq_add_id():
4562236b
HW
2790 * 1. Register a set() function with base driver.
2791 * Base driver will call set() function to enable/disable an
2792 * interrupt in DC hardware.
2793 * 2. Register amdgpu_dm_irq_handler().
2794 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2795 * coming from DC hardware.
2796 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2797 * for acknowledging and handling. */
2798
b57de80a 2799 /* Use VBLANK interrupt */
e9029155 2800 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2801 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2802 if (r) {
2803 DRM_ERROR("Failed to add crtc irq id!\n");
2804 return r;
2805 }
2806
2807 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2808 int_params.irq_source =
3d761e79 2809 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2810
b57de80a 2811 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2812
2813 c_irq_params->adev = adev;
2814 c_irq_params->irq_src = int_params.irq_source;
2815
2816 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2817 dm_crtc_high_irq, c_irq_params);
2818 }
2819
d2574c33
MK
2820 /* Use VUPDATE interrupt */
2821 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2822 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2823 if (r) {
2824 DRM_ERROR("Failed to add vupdate irq id!\n");
2825 return r;
2826 }
2827
2828 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2829 int_params.irq_source =
2830 dc_interrupt_to_irq_source(dc, i, 0);
2831
2832 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2833
2834 c_irq_params->adev = adev;
2835 c_irq_params->irq_src = int_params.irq_source;
2836
2837 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2838 dm_vupdate_high_irq, c_irq_params);
2839 }
2840
3d761e79 2841 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2842 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2843 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2844 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2845 if (r) {
2846 DRM_ERROR("Failed to add page flip irq id!\n");
2847 return r;
2848 }
2849
2850 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851 int_params.irq_source =
2852 dc_interrupt_to_irq_source(dc, i, 0);
2853
2854 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2855
2856 c_irq_params->adev = adev;
2857 c_irq_params->irq_src = int_params.irq_source;
2858
2859 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860 dm_pflip_high_irq, c_irq_params);
2861
2862 }
2863
2864 /* HPD */
2c8ad2d5
AD
2865 r = amdgpu_irq_add_id(adev, client_id,
2866 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2867 if (r) {
2868 DRM_ERROR("Failed to add hpd irq id!\n");
2869 return r;
2870 }
2871
2872 register_hpd_handlers(adev);
2873
2874 return 0;
2875}
2876
b86a1aa3 2877#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2878/* Register IRQ sources and initialize IRQ callbacks */
2879static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2880{
2881 struct dc *dc = adev->dm.dc;
2882 struct common_irq_params *c_irq_params;
2883 struct dc_interrupt_params int_params = {0};
2884 int r;
2885 int i;
2886
2887 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2888 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2889
1f6010a9
DF
2890 /*
2891 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2892 * 1. Register a set() function with base driver.
2893 * Base driver will call set() function to enable/disable an
2894 * interrupt in DC hardware.
2895 * 2. Register amdgpu_dm_irq_handler().
2896 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2897 * coming from DC hardware.
2898 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2899 * for acknowledging and handling.
1f6010a9 2900 */
ff5ef992
AD
2901
2902 /* Use VSTARTUP interrupt */
2903 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2904 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2905 i++) {
3760f76c 2906 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2907
2908 if (r) {
2909 DRM_ERROR("Failed to add crtc irq id!\n");
2910 return r;
2911 }
2912
2913 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2914 int_params.irq_source =
2915 dc_interrupt_to_irq_source(dc, i, 0);
2916
2917 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2918
2919 c_irq_params->adev = adev;
2920 c_irq_params->irq_src = int_params.irq_source;
2921
2346ef47
NK
2922 amdgpu_dm_irq_register_interrupt(
2923 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2924 }
2925
2926 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2927 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2928 * to trigger at end of each vblank, regardless of state of the lock,
2929 * matching DCE behaviour.
2930 */
2931 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2932 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2933 i++) {
2934 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2935
2936 if (r) {
2937 DRM_ERROR("Failed to add vupdate irq id!\n");
2938 return r;
2939 }
2940
2941 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2942 int_params.irq_source =
2943 dc_interrupt_to_irq_source(dc, i, 0);
2944
2945 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2946
2947 c_irq_params->adev = adev;
2948 c_irq_params->irq_src = int_params.irq_source;
2949
ff5ef992 2950 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2951 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2952 }
2953
ff5ef992
AD
2954 /* Use GRPH_PFLIP interrupt */
2955 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2956 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2957 i++) {
3760f76c 2958 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2959 if (r) {
2960 DRM_ERROR("Failed to add page flip irq id!\n");
2961 return r;
2962 }
2963
2964 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2965 int_params.irq_source =
2966 dc_interrupt_to_irq_source(dc, i, 0);
2967
2968 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2969
2970 c_irq_params->adev = adev;
2971 c_irq_params->irq_src = int_params.irq_source;
2972
2973 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2974 dm_pflip_high_irq, c_irq_params);
2975
2976 }
2977
2978 /* HPD */
3760f76c 2979 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2980 &adev->hpd_irq);
2981 if (r) {
2982 DRM_ERROR("Failed to add hpd irq id!\n");
2983 return r;
2984 }
2985
2986 register_hpd_handlers(adev);
2987
2988 return 0;
2989}
2990#endif
2991
eb3dc897
NK
2992/*
2993 * Acquires the lock for the atomic state object and returns
2994 * the new atomic state.
2995 *
2996 * This should only be called during atomic check.
2997 */
2998static int dm_atomic_get_state(struct drm_atomic_state *state,
2999 struct dm_atomic_state **dm_state)
3000{
3001 struct drm_device *dev = state->dev;
1348969a 3002 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3003 struct amdgpu_display_manager *dm = &adev->dm;
3004 struct drm_private_state *priv_state;
eb3dc897
NK
3005
3006 if (*dm_state)
3007 return 0;
3008
eb3dc897
NK
3009 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3010 if (IS_ERR(priv_state))
3011 return PTR_ERR(priv_state);
3012
3013 *dm_state = to_dm_atomic_state(priv_state);
3014
3015 return 0;
3016}
3017
dfd84d90 3018static struct dm_atomic_state *
eb3dc897
NK
3019dm_atomic_get_new_state(struct drm_atomic_state *state)
3020{
3021 struct drm_device *dev = state->dev;
1348969a 3022 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3023 struct amdgpu_display_manager *dm = &adev->dm;
3024 struct drm_private_obj *obj;
3025 struct drm_private_state *new_obj_state;
3026 int i;
3027
3028 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3029 if (obj->funcs == dm->atomic_obj.funcs)
3030 return to_dm_atomic_state(new_obj_state);
3031 }
3032
3033 return NULL;
3034}
3035
eb3dc897
NK
3036static struct drm_private_state *
3037dm_atomic_duplicate_state(struct drm_private_obj *obj)
3038{
3039 struct dm_atomic_state *old_state, *new_state;
3040
3041 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3042 if (!new_state)
3043 return NULL;
3044
3045 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3046
813d20dc
AW
3047 old_state = to_dm_atomic_state(obj->state);
3048
3049 if (old_state && old_state->context)
3050 new_state->context = dc_copy_state(old_state->context);
3051
eb3dc897
NK
3052 if (!new_state->context) {
3053 kfree(new_state);
3054 return NULL;
3055 }
3056
eb3dc897
NK
3057 return &new_state->base;
3058}
3059
3060static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3061 struct drm_private_state *state)
3062{
3063 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3064
3065 if (dm_state && dm_state->context)
3066 dc_release_state(dm_state->context);
3067
3068 kfree(dm_state);
3069}
3070
3071static struct drm_private_state_funcs dm_atomic_state_funcs = {
3072 .atomic_duplicate_state = dm_atomic_duplicate_state,
3073 .atomic_destroy_state = dm_atomic_destroy_state,
3074};
3075
4562236b
HW
3076static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3077{
eb3dc897 3078 struct dm_atomic_state *state;
4562236b
HW
3079 int r;
3080
3081 adev->mode_info.mode_config_initialized = true;
3082
4a580877
LT
3083 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3084 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3085
4a580877
LT
3086 adev_to_drm(adev)->mode_config.max_width = 16384;
3087 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3088
4a580877
LT
3089 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3090 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3091 /* indicates support for immediate flip */
4a580877 3092 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3093
4a580877 3094 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3095
eb3dc897
NK
3096 state = kzalloc(sizeof(*state), GFP_KERNEL);
3097 if (!state)
3098 return -ENOMEM;
3099
813d20dc 3100 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3101 if (!state->context) {
3102 kfree(state);
3103 return -ENOMEM;
3104 }
3105
3106 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3107
4a580877 3108 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3109 &adev->dm.atomic_obj,
eb3dc897
NK
3110 &state->base,
3111 &dm_atomic_state_funcs);
3112
3dc9b1ce 3113 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3114 if (r) {
3115 dc_release_state(state->context);
3116 kfree(state);
4562236b 3117 return r;
b67a468a 3118 }
4562236b 3119
6ce8f316 3120 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3121 if (r) {
3122 dc_release_state(state->context);
3123 kfree(state);
6ce8f316 3124 return r;
b67a468a 3125 }
6ce8f316 3126
4562236b
HW
3127 return 0;
3128}
3129
206bbafe
DF
3130#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3131#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3132#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3133
4562236b
HW
3134#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3135 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3136
206bbafe
DF
3137static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3138{
3139#if defined(CONFIG_ACPI)
3140 struct amdgpu_dm_backlight_caps caps;
3141
58965855
FS
3142 memset(&caps, 0, sizeof(caps));
3143
206bbafe
DF
3144 if (dm->backlight_caps.caps_valid)
3145 return;
3146
3147 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3148 if (caps.caps_valid) {
94562810
RS
3149 dm->backlight_caps.caps_valid = true;
3150 if (caps.aux_support)
3151 return;
206bbafe
DF
3152 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3153 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3154 } else {
3155 dm->backlight_caps.min_input_signal =
3156 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3157 dm->backlight_caps.max_input_signal =
3158 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3159 }
3160#else
94562810
RS
3161 if (dm->backlight_caps.aux_support)
3162 return;
3163
8bcbc9ef
DF
3164 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3165 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3166#endif
3167}
3168
94562810
RS
3169static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3170{
3171 bool rc;
3172
3173 if (!link)
3174 return 1;
3175
3176 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3177 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3178
3179 return rc ? 0 : 1;
3180}
3181
69d9f427
AM
3182static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3183 unsigned *min, unsigned *max)
94562810 3184{
94562810 3185 if (!caps)
69d9f427 3186 return 0;
94562810 3187
69d9f427
AM
3188 if (caps->aux_support) {
3189 // Firmware limits are in nits, DC API wants millinits.
3190 *max = 1000 * caps->aux_max_input_signal;
3191 *min = 1000 * caps->aux_min_input_signal;
94562810 3192 } else {
69d9f427
AM
3193 // Firmware limits are 8-bit, PWM control is 16-bit.
3194 *max = 0x101 * caps->max_input_signal;
3195 *min = 0x101 * caps->min_input_signal;
94562810 3196 }
69d9f427
AM
3197 return 1;
3198}
94562810 3199
69d9f427
AM
3200static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3201 uint32_t brightness)
3202{
3203 unsigned min, max;
94562810 3204
69d9f427
AM
3205 if (!get_brightness_range(caps, &min, &max))
3206 return brightness;
3207
3208 // Rescale 0..255 to min..max
3209 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3210 AMDGPU_MAX_BL_LEVEL);
3211}
3212
3213static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3214 uint32_t brightness)
3215{
3216 unsigned min, max;
3217
3218 if (!get_brightness_range(caps, &min, &max))
3219 return brightness;
3220
3221 if (brightness < min)
3222 return 0;
3223 // Rescale min..max to 0..255
3224 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3225 max - min);
94562810
RS
3226}
3227
4562236b
HW
3228static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3229{
3230 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3231 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3232 struct dc_link *link = NULL;
3233 u32 brightness;
3234 bool rc;
4562236b 3235
206bbafe
DF
3236 amdgpu_dm_update_backlight_caps(dm);
3237 caps = dm->backlight_caps;
94562810
RS
3238
3239 link = (struct dc_link *)dm->backlight_link;
3240
69d9f427 3241 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3242 // Change brightness based on AUX property
3243 if (caps.aux_support)
3244 return set_backlight_via_aux(link, brightness);
3245
3246 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3247
3248 return rc ? 0 : 1;
4562236b
HW
3249}
3250
3251static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3252{
620a0d27
DF
3253 struct amdgpu_display_manager *dm = bl_get_data(bd);
3254 int ret = dc_link_get_backlight_level(dm->backlight_link);
3255
3256 if (ret == DC_ERROR_UNEXPECTED)
3257 return bd->props.brightness;
69d9f427 3258 return convert_brightness_to_user(&dm->backlight_caps, ret);
4562236b
HW
3259}
3260
3261static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3262 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3263 .get_brightness = amdgpu_dm_backlight_get_brightness,
3264 .update_status = amdgpu_dm_backlight_update_status,
3265};
3266
7578ecda
AD
3267static void
3268amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3269{
3270 char bl_name[16];
3271 struct backlight_properties props = { 0 };
3272
206bbafe
DF
3273 amdgpu_dm_update_backlight_caps(dm);
3274
4562236b 3275 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3276 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3277 props.type = BACKLIGHT_RAW;
3278
3279 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3280 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3281
3282 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3283 adev_to_drm(dm->adev)->dev,
3284 dm,
3285 &amdgpu_dm_backlight_ops,
3286 &props);
4562236b 3287
74baea42 3288 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3289 DRM_ERROR("DM: Backlight registration failed!\n");
3290 else
f1ad2f5e 3291 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3292}
3293
3294#endif
3295
df534fff 3296static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3297 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3298 enum drm_plane_type plane_type,
3299 const struct dc_plane_cap *plane_cap)
df534fff 3300{
f180b4bc 3301 struct drm_plane *plane;
df534fff
S
3302 unsigned long possible_crtcs;
3303 int ret = 0;
3304
f180b4bc 3305 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3306 if (!plane) {
3307 DRM_ERROR("KMS: Failed to allocate plane\n");
3308 return -ENOMEM;
3309 }
b2fddb13 3310 plane->type = plane_type;
df534fff
S
3311
3312 /*
b2fddb13
NK
3313 * HACK: IGT tests expect that the primary plane for a CRTC
3314 * can only have one possible CRTC. Only expose support for
3315 * any CRTC if they're not going to be used as a primary plane
3316 * for a CRTC - like overlay or underlay planes.
df534fff
S
3317 */
3318 possible_crtcs = 1 << plane_id;
3319 if (plane_id >= dm->dc->caps.max_streams)
3320 possible_crtcs = 0xff;
3321
cc1fec57 3322 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3323
3324 if (ret) {
3325 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3326 kfree(plane);
df534fff
S
3327 return ret;
3328 }
3329
54087768
NK
3330 if (mode_info)
3331 mode_info->planes[plane_id] = plane;
3332
df534fff
S
3333 return ret;
3334}
3335
89fc8d4e
HW
3336
3337static void register_backlight_device(struct amdgpu_display_manager *dm,
3338 struct dc_link *link)
3339{
3340#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3341 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3342
3343 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3344 link->type != dc_connection_none) {
1f6010a9
DF
3345 /*
3346 * Event if registration failed, we should continue with
89fc8d4e
HW
3347 * DM initialization because not having a backlight control
3348 * is better then a black screen.
3349 */
3350 amdgpu_dm_register_backlight_device(dm);
3351
3352 if (dm->backlight_dev)
3353 dm->backlight_link = link;
3354 }
3355#endif
3356}
3357
3358
1f6010a9
DF
3359/*
3360 * In this architecture, the association
4562236b
HW
3361 * connector -> encoder -> crtc
3362 * id not really requried. The crtc and connector will hold the
3363 * display_index as an abstraction to use with DAL component
3364 *
3365 * Returns 0 on success
3366 */
7578ecda 3367static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3368{
3369 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3370 int32_t i;
c84dec2f 3371 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3372 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3373 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3374 uint32_t link_cnt;
cc1fec57 3375 int32_t primary_planes;
fbbdadf2 3376 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3377 const struct dc_plane_cap *plane;
4562236b 3378
d58159de
AD
3379 dm->display_indexes_num = dm->dc->caps.max_streams;
3380 /* Update the actual used number of crtc */
3381 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3382
4562236b 3383 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3384 if (amdgpu_dm_mode_config_init(dm->adev)) {
3385 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3386 return -EINVAL;
4562236b
HW
3387 }
3388
b2fddb13
NK
3389 /* There is one primary plane per CRTC */
3390 primary_planes = dm->dc->caps.max_streams;
54087768 3391 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3392
b2fddb13
NK
3393 /*
3394 * Initialize primary planes, implicit planes for legacy IOCTLS.
3395 * Order is reversed to match iteration order in atomic check.
3396 */
3397 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3398 plane = &dm->dc->caps.planes[i];
3399
b2fddb13 3400 if (initialize_plane(dm, mode_info, i,
cc1fec57 3401 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3402 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3403 goto fail;
d4e13b0d 3404 }
df534fff 3405 }
92f3ac40 3406
0d579c7e
NK
3407 /*
3408 * Initialize overlay planes, index starting after primary planes.
3409 * These planes have a higher DRM index than the primary planes since
3410 * they should be considered as having a higher z-order.
3411 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3412 *
3413 * Only support DCN for now, and only expose one so we don't encourage
3414 * userspace to use up all the pipes.
0d579c7e 3415 */
cc1fec57
NK
3416 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3417 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3418
3419 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3420 continue;
3421
3422 if (!plane->blends_with_above || !plane->blends_with_below)
3423 continue;
3424
ea36ad34 3425 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3426 continue;
3427
54087768 3428 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3429 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3430 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3431 goto fail;
d4e13b0d 3432 }
cc1fec57
NK
3433
3434 /* Only create one overlay plane. */
3435 break;
d4e13b0d 3436 }
4562236b 3437
d4e13b0d 3438 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3439 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3440 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3441 goto fail;
4562236b 3442 }
4562236b 3443
4562236b
HW
3444 /* loops over all connectors on the board */
3445 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3446 struct dc_link *link = NULL;
4562236b
HW
3447
3448 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3449 DRM_ERROR(
3450 "KMS: Cannot support more than %d display indexes\n",
3451 AMDGPU_DM_MAX_DISPLAY_INDEX);
3452 continue;
3453 }
3454
3455 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3456 if (!aconnector)
cd8a2ae8 3457 goto fail;
4562236b
HW
3458
3459 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3460 if (!aencoder)
cd8a2ae8 3461 goto fail;
4562236b
HW
3462
3463 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3464 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3465 goto fail;
4562236b
HW
3466 }
3467
3468 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3469 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3470 goto fail;
4562236b
HW
3471 }
3472
89fc8d4e
HW
3473 link = dc_get_link_at_index(dm->dc, i);
3474
fbbdadf2
BL
3475 if (!dc_link_detect_sink(link, &new_connection_type))
3476 DRM_ERROR("KMS: Failed to detect connector\n");
3477
3478 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3479 emulated_link_detect(link);
3480 amdgpu_dm_update_connector_after_detect(aconnector);
3481
3482 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3483 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3484 register_backlight_device(dm, link);
397a9bc5
RL
3485 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3486 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3487 }
3488
3489
4562236b
HW
3490 }
3491
3492 /* Software is initialized. Now we can register interrupt handlers. */
3493 switch (adev->asic_type) {
55e56389
MR
3494#if defined(CONFIG_DRM_AMD_DC_SI)
3495 case CHIP_TAHITI:
3496 case CHIP_PITCAIRN:
3497 case CHIP_VERDE:
3498 case CHIP_OLAND:
3499 if (dce60_register_irq_handlers(dm->adev)) {
3500 DRM_ERROR("DM: Failed to initialize IRQ\n");
3501 goto fail;
3502 }
3503 break;
3504#endif
4562236b
HW
3505 case CHIP_BONAIRE:
3506 case CHIP_HAWAII:
cd4b356f
AD
3507 case CHIP_KAVERI:
3508 case CHIP_KABINI:
3509 case CHIP_MULLINS:
4562236b
HW
3510 case CHIP_TONGA:
3511 case CHIP_FIJI:
3512 case CHIP_CARRIZO:
3513 case CHIP_STONEY:
3514 case CHIP_POLARIS11:
3515 case CHIP_POLARIS10:
b264d345 3516 case CHIP_POLARIS12:
7737de91 3517 case CHIP_VEGAM:
2c8ad2d5 3518 case CHIP_VEGA10:
2325ff30 3519 case CHIP_VEGA12:
1fe6bf2f 3520 case CHIP_VEGA20:
4562236b
HW
3521 if (dce110_register_irq_handlers(dm->adev)) {
3522 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3523 goto fail;
4562236b
HW
3524 }
3525 break;
b86a1aa3 3526#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3527 case CHIP_RAVEN:
fbd2afe5 3528 case CHIP_NAVI12:
476e955d 3529 case CHIP_NAVI10:
fce651e3 3530 case CHIP_NAVI14:
30221ad8 3531 case CHIP_RENOIR:
79037324 3532 case CHIP_SIENNA_CICHLID:
a6c5308f 3533 case CHIP_NAVY_FLOUNDER:
2a411205 3534 case CHIP_DIMGREY_CAVEFISH:
469989ca 3535 case CHIP_VANGOGH:
ff5ef992
AD
3536 if (dcn10_register_irq_handlers(dm->adev)) {
3537 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3538 goto fail;
ff5ef992
AD
3539 }
3540 break;
3541#endif
4562236b 3542 default:
e63f8673 3543 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3544 goto fail;
4562236b
HW
3545 }
3546
4562236b 3547 return 0;
cd8a2ae8 3548fail:
4562236b 3549 kfree(aencoder);
4562236b 3550 kfree(aconnector);
54087768 3551
59d0f396 3552 return -EINVAL;
4562236b
HW
3553}
3554
7578ecda 3555static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3556{
3557 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3558 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3559 return;
3560}
3561
3562/******************************************************************************
3563 * amdgpu_display_funcs functions
3564 *****************************************************************************/
3565
1f6010a9 3566/*
4562236b
HW
3567 * dm_bandwidth_update - program display watermarks
3568 *
3569 * @adev: amdgpu_device pointer
3570 *
3571 * Calculate and program the display watermarks and line buffer allocation.
3572 */
3573static void dm_bandwidth_update(struct amdgpu_device *adev)
3574{
49c07a99 3575 /* TODO: implement later */
4562236b
HW
3576}
3577
39cc5be2 3578static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3579 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3580 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3581 .backlight_set_level = NULL, /* never called for DC */
3582 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3583 .hpd_sense = NULL,/* called unconditionally */
3584 .hpd_set_polarity = NULL, /* called unconditionally */
3585 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3586 .page_flip_get_scanoutpos =
3587 dm_crtc_get_scanoutpos,/* called unconditionally */
3588 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3589 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3590};
3591
3592#if defined(CONFIG_DEBUG_KERNEL_DC)
3593
3ee6b26b
AD
3594static ssize_t s3_debug_store(struct device *device,
3595 struct device_attribute *attr,
3596 const char *buf,
3597 size_t count)
4562236b
HW
3598{
3599 int ret;
3600 int s3_state;
ef1de361 3601 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3602 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3603
3604 ret = kstrtoint(buf, 0, &s3_state);
3605
3606 if (ret == 0) {
3607 if (s3_state) {
3608 dm_resume(adev);
4a580877 3609 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3610 } else
3611 dm_suspend(adev);
3612 }
3613
3614 return ret == 0 ? count : 0;
3615}
3616
3617DEVICE_ATTR_WO(s3_debug);
3618
3619#endif
3620
3621static int dm_early_init(void *handle)
3622{
3623 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3624
4562236b 3625 switch (adev->asic_type) {
55e56389
MR
3626#if defined(CONFIG_DRM_AMD_DC_SI)
3627 case CHIP_TAHITI:
3628 case CHIP_PITCAIRN:
3629 case CHIP_VERDE:
3630 adev->mode_info.num_crtc = 6;
3631 adev->mode_info.num_hpd = 6;
3632 adev->mode_info.num_dig = 6;
3633 break;
3634 case CHIP_OLAND:
3635 adev->mode_info.num_crtc = 2;
3636 adev->mode_info.num_hpd = 2;
3637 adev->mode_info.num_dig = 2;
3638 break;
3639#endif
4562236b
HW
3640 case CHIP_BONAIRE:
3641 case CHIP_HAWAII:
3642 adev->mode_info.num_crtc = 6;
3643 adev->mode_info.num_hpd = 6;
3644 adev->mode_info.num_dig = 6;
4562236b 3645 break;
cd4b356f
AD
3646 case CHIP_KAVERI:
3647 adev->mode_info.num_crtc = 4;
3648 adev->mode_info.num_hpd = 6;
3649 adev->mode_info.num_dig = 7;
cd4b356f
AD
3650 break;
3651 case CHIP_KABINI:
3652 case CHIP_MULLINS:
3653 adev->mode_info.num_crtc = 2;
3654 adev->mode_info.num_hpd = 6;
3655 adev->mode_info.num_dig = 6;
cd4b356f 3656 break;
4562236b
HW
3657 case CHIP_FIJI:
3658 case CHIP_TONGA:
3659 adev->mode_info.num_crtc = 6;
3660 adev->mode_info.num_hpd = 6;
3661 adev->mode_info.num_dig = 7;
4562236b
HW
3662 break;
3663 case CHIP_CARRIZO:
3664 adev->mode_info.num_crtc = 3;
3665 adev->mode_info.num_hpd = 6;
3666 adev->mode_info.num_dig = 9;
4562236b
HW
3667 break;
3668 case CHIP_STONEY:
3669 adev->mode_info.num_crtc = 2;
3670 adev->mode_info.num_hpd = 6;
3671 adev->mode_info.num_dig = 9;
4562236b
HW
3672 break;
3673 case CHIP_POLARIS11:
b264d345 3674 case CHIP_POLARIS12:
4562236b
HW
3675 adev->mode_info.num_crtc = 5;
3676 adev->mode_info.num_hpd = 5;
3677 adev->mode_info.num_dig = 5;
4562236b
HW
3678 break;
3679 case CHIP_POLARIS10:
7737de91 3680 case CHIP_VEGAM:
4562236b
HW
3681 adev->mode_info.num_crtc = 6;
3682 adev->mode_info.num_hpd = 6;
3683 adev->mode_info.num_dig = 6;
4562236b 3684 break;
2c8ad2d5 3685 case CHIP_VEGA10:
2325ff30 3686 case CHIP_VEGA12:
1fe6bf2f 3687 case CHIP_VEGA20:
2c8ad2d5
AD
3688 adev->mode_info.num_crtc = 6;
3689 adev->mode_info.num_hpd = 6;
3690 adev->mode_info.num_dig = 6;
3691 break;
b86a1aa3 3692#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3693 case CHIP_RAVEN:
20f2ffe5
AD
3694 case CHIP_RENOIR:
3695 case CHIP_VANGOGH:
ff5ef992
AD
3696 adev->mode_info.num_crtc = 4;
3697 adev->mode_info.num_hpd = 4;
3698 adev->mode_info.num_dig = 4;
ff5ef992 3699 break;
476e955d 3700 case CHIP_NAVI10:
fbd2afe5 3701 case CHIP_NAVI12:
79037324 3702 case CHIP_SIENNA_CICHLID:
a6c5308f 3703 case CHIP_NAVY_FLOUNDER:
476e955d
HW
3704 adev->mode_info.num_crtc = 6;
3705 adev->mode_info.num_hpd = 6;
3706 adev->mode_info.num_dig = 6;
3707 break;
fce651e3 3708 case CHIP_NAVI14:
2a411205 3709 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
3710 adev->mode_info.num_crtc = 5;
3711 adev->mode_info.num_hpd = 5;
3712 adev->mode_info.num_dig = 5;
3713 break;
20f2ffe5 3714#endif
4562236b 3715 default:
e63f8673 3716 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3717 return -EINVAL;
3718 }
3719
c8dd5715
MD
3720 amdgpu_dm_set_irq_funcs(adev);
3721
39cc5be2
AD
3722 if (adev->mode_info.funcs == NULL)
3723 adev->mode_info.funcs = &dm_display_funcs;
3724
1f6010a9
DF
3725 /*
3726 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3727 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3728 * amdgpu_device_init()
3729 */
4562236b
HW
3730#if defined(CONFIG_DEBUG_KERNEL_DC)
3731 device_create_file(
4a580877 3732 adev_to_drm(adev)->dev,
4562236b
HW
3733 &dev_attr_s3_debug);
3734#endif
3735
3736 return 0;
3737}
3738
9b690ef3 3739static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3740 struct dc_stream_state *new_stream,
3741 struct dc_stream_state *old_stream)
9b690ef3 3742{
2afda735 3743 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3744}
3745
3746static bool modereset_required(struct drm_crtc_state *crtc_state)
3747{
2afda735 3748 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3749}
3750
7578ecda 3751static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3752{
3753 drm_encoder_cleanup(encoder);
3754 kfree(encoder);
3755}
3756
3757static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3758 .destroy = amdgpu_dm_encoder_destroy,
3759};
3760
e7b07cee 3761
695af5f9
NK
3762static int fill_dc_scaling_info(const struct drm_plane_state *state,
3763 struct dc_scaling_info *scaling_info)
e7b07cee 3764{
6491f0c0 3765 int scale_w, scale_h;
e7b07cee 3766
695af5f9 3767 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3768
695af5f9
NK
3769 /* Source is fixed 16.16 but we ignore mantissa for now... */
3770 scaling_info->src_rect.x = state->src_x >> 16;
3771 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3772
695af5f9
NK
3773 scaling_info->src_rect.width = state->src_w >> 16;
3774 if (scaling_info->src_rect.width == 0)
3775 return -EINVAL;
3776
3777 scaling_info->src_rect.height = state->src_h >> 16;
3778 if (scaling_info->src_rect.height == 0)
3779 return -EINVAL;
3780
3781 scaling_info->dst_rect.x = state->crtc_x;
3782 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3783
3784 if (state->crtc_w == 0)
695af5f9 3785 return -EINVAL;
e7b07cee 3786
695af5f9 3787 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3788
3789 if (state->crtc_h == 0)
695af5f9 3790 return -EINVAL;
e7b07cee 3791
695af5f9 3792 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3793
695af5f9
NK
3794 /* DRM doesn't specify clipping on destination output. */
3795 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3796
6491f0c0
NK
3797 /* TODO: Validate scaling per-format with DC plane caps */
3798 scale_w = scaling_info->dst_rect.width * 1000 /
3799 scaling_info->src_rect.width;
e7b07cee 3800
6491f0c0
NK
3801 if (scale_w < 250 || scale_w > 16000)
3802 return -EINVAL;
3803
3804 scale_h = scaling_info->dst_rect.height * 1000 /
3805 scaling_info->src_rect.height;
3806
3807 if (scale_h < 250 || scale_h > 16000)
3808 return -EINVAL;
3809
695af5f9
NK
3810 /*
3811 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3812 * assume reasonable defaults based on the format.
3813 */
e7b07cee 3814
695af5f9 3815 return 0;
4562236b 3816}
695af5f9 3817
a3241991
BN
3818static void
3819fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3820 uint64_t tiling_flags)
e7b07cee 3821{
a3241991
BN
3822 /* Fill GFX8 params */
3823 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3824 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 3825
a3241991
BN
3826 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3827 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3828 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3829 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3830 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 3831
a3241991
BN
3832 /* XXX fix me for VI */
3833 tiling_info->gfx8.num_banks = num_banks;
3834 tiling_info->gfx8.array_mode =
3835 DC_ARRAY_2D_TILED_THIN1;
3836 tiling_info->gfx8.tile_split = tile_split;
3837 tiling_info->gfx8.bank_width = bankw;
3838 tiling_info->gfx8.bank_height = bankh;
3839 tiling_info->gfx8.tile_aspect = mtaspect;
3840 tiling_info->gfx8.tile_mode =
3841 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3842 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3843 == DC_ARRAY_1D_TILED_THIN1) {
3844 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
3845 }
3846
a3241991
BN
3847 tiling_info->gfx8.pipe_config =
3848 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
3849}
3850
a3241991
BN
3851static void
3852fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3853 union dc_tiling_info *tiling_info)
3854{
3855 tiling_info->gfx9.num_pipes =
3856 adev->gfx.config.gb_addr_config_fields.num_pipes;
3857 tiling_info->gfx9.num_banks =
3858 adev->gfx.config.gb_addr_config_fields.num_banks;
3859 tiling_info->gfx9.pipe_interleave =
3860 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3861 tiling_info->gfx9.num_shader_engines =
3862 adev->gfx.config.gb_addr_config_fields.num_se;
3863 tiling_info->gfx9.max_compressed_frags =
3864 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3865 tiling_info->gfx9.num_rb_per_se =
3866 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3867 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
3868 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3869 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3870 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3871 adev->asic_type == CHIP_VANGOGH)
3872 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
3873}
3874
695af5f9 3875static int
a3241991
BN
3876validate_dcc(struct amdgpu_device *adev,
3877 const enum surface_pixel_format format,
3878 const enum dc_rotation_angle rotation,
3879 const union dc_tiling_info *tiling_info,
3880 const struct dc_plane_dcc_param *dcc,
3881 const struct dc_plane_address *address,
3882 const struct plane_size *plane_size)
7df7e505
NK
3883{
3884 struct dc *dc = adev->dm.dc;
8daa1218
NC
3885 struct dc_dcc_surface_param input;
3886 struct dc_surface_dcc_cap output;
7df7e505 3887
8daa1218
NC
3888 memset(&input, 0, sizeof(input));
3889 memset(&output, 0, sizeof(output));
3890
a3241991 3891 if (!dcc->enable)
87b7ebc2
RS
3892 return 0;
3893
a3241991
BN
3894 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3895 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3896 return -EINVAL;
7df7e505 3897
695af5f9 3898 input.format = format;
12e2b2d4
DL
3899 input.surface_size.width = plane_size->surface_size.width;
3900 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3901 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3902
695af5f9 3903 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3904 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3905 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3906 input.scan = SCAN_DIRECTION_VERTICAL;
3907
3908 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3909 return -EINVAL;
7df7e505
NK
3910
3911 if (!output.capable)
09e5665a 3912 return -EINVAL;
7df7e505 3913
a3241991
BN
3914 if (dcc->independent_64b_blks == 0 &&
3915 output.grph.rgb.independent_64b_blks != 0)
09e5665a 3916 return -EINVAL;
7df7e505 3917
a3241991
BN
3918 return 0;
3919}
3920
37384b3f
BN
3921static bool
3922modifier_has_dcc(uint64_t modifier)
3923{
3924 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3925}
3926
3927static unsigned
3928modifier_gfx9_swizzle_mode(uint64_t modifier)
3929{
3930 if (modifier == DRM_FORMAT_MOD_LINEAR)
3931 return 0;
3932
3933 return AMD_FMT_MOD_GET(TILE, modifier);
3934}
3935
dfbbfe3c
BN
3936static const struct drm_format_info *
3937amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3938{
816853f9 3939 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
3940}
3941
37384b3f
BN
3942static void
3943fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3944 union dc_tiling_info *tiling_info,
3945 uint64_t modifier)
3946{
3947 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3948 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3949 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3950 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3951
3952 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3953
3954 if (!IS_AMD_FMT_MOD(modifier))
3955 return;
3956
3957 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3958 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3959
3960 if (adev->family >= AMDGPU_FAMILY_NV) {
3961 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3962 } else {
3963 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3964
3965 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3966 }
3967}
3968
faa37f54
BN
3969enum dm_micro_swizzle {
3970 MICRO_SWIZZLE_Z = 0,
3971 MICRO_SWIZZLE_S = 1,
3972 MICRO_SWIZZLE_D = 2,
3973 MICRO_SWIZZLE_R = 3
3974};
3975
3976static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3977 uint32_t format,
3978 uint64_t modifier)
3979{
3980 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3981 const struct drm_format_info *info = drm_format_info(format);
3982
3983 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3984
3985 if (!info)
3986 return false;
3987
3988 /*
3989 * We always have to allow this modifier, because core DRM still
3990 * checks LINEAR support if userspace does not provide modifers.
3991 */
3992 if (modifier == DRM_FORMAT_MOD_LINEAR)
3993 return true;
3994
3995 /*
3996 * The arbitrary tiling support for multiplane formats has not been hooked
3997 * up.
3998 */
3999 if (info->num_planes > 1)
4000 return false;
4001
4002 /*
4003 * For D swizzle the canonical modifier depends on the bpp, so check
4004 * it here.
4005 */
4006 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4007 adev->family >= AMDGPU_FAMILY_NV) {
4008 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4009 return false;
4010 }
4011
4012 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4013 info->cpp[0] < 8)
4014 return false;
4015
4016 if (modifier_has_dcc(modifier)) {
4017 /* Per radeonsi comments 16/64 bpp are more complicated. */
4018 if (info->cpp[0] != 4)
4019 return false;
4020 }
4021
4022 return true;
4023}
4024
4025static void
4026add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4027{
4028 if (!*mods)
4029 return;
4030
4031 if (*cap - *size < 1) {
4032 uint64_t new_cap = *cap * 2;
4033 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4034
4035 if (!new_mods) {
4036 kfree(*mods);
4037 *mods = NULL;
4038 return;
4039 }
4040
4041 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4042 kfree(*mods);
4043 *mods = new_mods;
4044 *cap = new_cap;
4045 }
4046
4047 (*mods)[*size] = mod;
4048 *size += 1;
4049}
4050
4051static void
4052add_gfx9_modifiers(const struct amdgpu_device *adev,
4053 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4054{
4055 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4056 int pipe_xor_bits = min(8, pipes +
4057 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4058 int bank_xor_bits = min(8 - pipe_xor_bits,
4059 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4060 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4061 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4062
4063
4064 if (adev->family == AMDGPU_FAMILY_RV) {
4065 /* Raven2 and later */
4066 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4067
4068 /*
4069 * No _D DCC swizzles yet because we only allow 32bpp, which
4070 * doesn't support _D on DCN
4071 */
4072
4073 if (has_constant_encode) {
4074 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4075 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4076 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4077 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4078 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4079 AMD_FMT_MOD_SET(DCC, 1) |
4080 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4081 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4082 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4083 }
4084
4085 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4086 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4087 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4088 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4089 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4090 AMD_FMT_MOD_SET(DCC, 1) |
4091 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4092 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4093 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4094
4095 if (has_constant_encode) {
4096 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4097 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4098 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4099 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4100 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4101 AMD_FMT_MOD_SET(DCC, 1) |
4102 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4103 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4104 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4105
4106 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4107 AMD_FMT_MOD_SET(RB, rb) |
4108 AMD_FMT_MOD_SET(PIPE, pipes));
4109 }
4110
4111 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4112 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4113 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4114 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4115 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4116 AMD_FMT_MOD_SET(DCC, 1) |
4117 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4118 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4119 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4120 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4121 AMD_FMT_MOD_SET(RB, rb) |
4122 AMD_FMT_MOD_SET(PIPE, pipes));
4123 }
4124
4125 /*
4126 * Only supported for 64bpp on Raven, will be filtered on format in
4127 * dm_plane_format_mod_supported.
4128 */
4129 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4130 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4131 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4132 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4133 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4134
4135 if (adev->family == AMDGPU_FAMILY_RV) {
4136 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4137 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4138 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4139 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4140 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4141 }
4142
4143 /*
4144 * Only supported for 64bpp on Raven, will be filtered on format in
4145 * dm_plane_format_mod_supported.
4146 */
4147 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4148 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4149 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4150
4151 if (adev->family == AMDGPU_FAMILY_RV) {
4152 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4153 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4154 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4155 }
4156}
4157
4158static void
4159add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4160 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4161{
4162 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4163
4164 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4165 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4166 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4167 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4168 AMD_FMT_MOD_SET(DCC, 1) |
4169 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4170 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4171 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4172
4173 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4174 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4175 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4176 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4177 AMD_FMT_MOD_SET(DCC, 1) |
4178 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4179 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4180 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4181 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4182
4183 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4184 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4185 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4186 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4187
4188 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4189 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4190 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4191 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4192
4193
4194 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4195 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4196 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4197 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4198
4199 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4200 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4201 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4202}
4203
4204static void
4205add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4206 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4207{
4208 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4209 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4210
4211 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4212 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4213 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4214 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4215 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4216 AMD_FMT_MOD_SET(DCC, 1) |
4217 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4218 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4219 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4220 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4221
4222 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4223 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4224 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4225 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4226 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4227 AMD_FMT_MOD_SET(DCC, 1) |
4228 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4229 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4230 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4231 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4232 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4233
4234 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4235 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4236 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4237 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4238 AMD_FMT_MOD_SET(PACKERS, pkrs));
4239
4240 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4241 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4242 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4243 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4244 AMD_FMT_MOD_SET(PACKERS, pkrs));
4245
4246 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4247 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4248 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4249 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4250
4251 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4252 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4253 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4254}
4255
4256static int
4257get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4258{
4259 uint64_t size = 0, capacity = 128;
4260 *mods = NULL;
4261
4262 /* We have not hooked up any pre-GFX9 modifiers. */
4263 if (adev->family < AMDGPU_FAMILY_AI)
4264 return 0;
4265
4266 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4267
4268 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4269 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4270 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4271 return *mods ? 0 : -ENOMEM;
4272 }
4273
4274 switch (adev->family) {
4275 case AMDGPU_FAMILY_AI:
4276 case AMDGPU_FAMILY_RV:
4277 add_gfx9_modifiers(adev, mods, &size, &capacity);
4278 break;
4279 case AMDGPU_FAMILY_NV:
4280 case AMDGPU_FAMILY_VGH:
4281 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4282 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4283 else
4284 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4285 break;
4286 }
4287
4288 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4289
4290 /* INVALID marks the end of the list. */
4291 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4292
4293 if (!*mods)
4294 return -ENOMEM;
4295
4296 return 0;
4297}
4298
37384b3f
BN
4299static int
4300fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4301 const struct amdgpu_framebuffer *afb,
4302 const enum surface_pixel_format format,
4303 const enum dc_rotation_angle rotation,
4304 const struct plane_size *plane_size,
4305 union dc_tiling_info *tiling_info,
4306 struct dc_plane_dcc_param *dcc,
4307 struct dc_plane_address *address,
4308 const bool force_disable_dcc)
4309{
4310 const uint64_t modifier = afb->base.modifier;
4311 int ret;
4312
4313 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4314 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4315
4316 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4317 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4318
4319 dcc->enable = 1;
4320 dcc->meta_pitch = afb->base.pitches[1];
4321 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4322
4323 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4324 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4325 }
4326
4327 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4328 if (ret)
4329 return ret;
7df7e505 4330
09e5665a
NK
4331 return 0;
4332}
4333
4334static int
320932bf 4335fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4336 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4337 const enum surface_pixel_format format,
4338 const enum dc_rotation_angle rotation,
4339 const uint64_t tiling_flags,
09e5665a 4340 union dc_tiling_info *tiling_info,
12e2b2d4 4341 struct plane_size *plane_size,
09e5665a 4342 struct dc_plane_dcc_param *dcc,
87b7ebc2 4343 struct dc_plane_address *address,
5888f07a 4344 bool tmz_surface,
87b7ebc2 4345 bool force_disable_dcc)
09e5665a 4346{
320932bf 4347 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4348 int ret;
4349
4350 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4351 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4352 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4353 memset(address, 0, sizeof(*address));
4354
5888f07a
HW
4355 address->tmz_surface = tmz_surface;
4356
695af5f9 4357 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4358 uint64_t addr = afb->address + fb->offsets[0];
4359
12e2b2d4
DL
4360 plane_size->surface_size.x = 0;
4361 plane_size->surface_size.y = 0;
4362 plane_size->surface_size.width = fb->width;
4363 plane_size->surface_size.height = fb->height;
4364 plane_size->surface_pitch =
320932bf
NK
4365 fb->pitches[0] / fb->format->cpp[0];
4366
e0634e8d 4367 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4368 address->grph.addr.low_part = lower_32_bits(addr);
4369 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4370 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4371 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4372 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4373
12e2b2d4
DL
4374 plane_size->surface_size.x = 0;
4375 plane_size->surface_size.y = 0;
4376 plane_size->surface_size.width = fb->width;
4377 plane_size->surface_size.height = fb->height;
4378 plane_size->surface_pitch =
320932bf
NK
4379 fb->pitches[0] / fb->format->cpp[0];
4380
12e2b2d4
DL
4381 plane_size->chroma_size.x = 0;
4382 plane_size->chroma_size.y = 0;
320932bf 4383 /* TODO: set these based on surface format */
12e2b2d4
DL
4384 plane_size->chroma_size.width = fb->width / 2;
4385 plane_size->chroma_size.height = fb->height / 2;
320932bf 4386
12e2b2d4 4387 plane_size->chroma_pitch =
320932bf
NK
4388 fb->pitches[1] / fb->format->cpp[1];
4389
e0634e8d
NK
4390 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4391 address->video_progressive.luma_addr.low_part =
be7b9b32 4392 lower_32_bits(luma_addr);
e0634e8d 4393 address->video_progressive.luma_addr.high_part =
be7b9b32 4394 upper_32_bits(luma_addr);
e0634e8d
NK
4395 address->video_progressive.chroma_addr.low_part =
4396 lower_32_bits(chroma_addr);
4397 address->video_progressive.chroma_addr.high_part =
4398 upper_32_bits(chroma_addr);
4399 }
09e5665a 4400
a3241991 4401 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4402 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4403 rotation, plane_size,
4404 tiling_info, dcc,
4405 address,
4406 force_disable_dcc);
09e5665a
NK
4407 if (ret)
4408 return ret;
a3241991
BN
4409 } else {
4410 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4411 }
4412
4413 return 0;
7df7e505
NK
4414}
4415
d74004b6 4416static void
695af5f9 4417fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4418 bool *per_pixel_alpha, bool *global_alpha,
4419 int *global_alpha_value)
4420{
4421 *per_pixel_alpha = false;
4422 *global_alpha = false;
4423 *global_alpha_value = 0xff;
4424
4425 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4426 return;
4427
4428 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4429 static const uint32_t alpha_formats[] = {
4430 DRM_FORMAT_ARGB8888,
4431 DRM_FORMAT_RGBA8888,
4432 DRM_FORMAT_ABGR8888,
4433 };
4434 uint32_t format = plane_state->fb->format->format;
4435 unsigned int i;
4436
4437 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4438 if (format == alpha_formats[i]) {
4439 *per_pixel_alpha = true;
4440 break;
4441 }
4442 }
4443 }
4444
4445 if (plane_state->alpha < 0xffff) {
4446 *global_alpha = true;
4447 *global_alpha_value = plane_state->alpha >> 8;
4448 }
4449}
4450
004fefa3
NK
4451static int
4452fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4453 const enum surface_pixel_format format,
004fefa3
NK
4454 enum dc_color_space *color_space)
4455{
4456 bool full_range;
4457
4458 *color_space = COLOR_SPACE_SRGB;
4459
4460 /* DRM color properties only affect non-RGB formats. */
695af5f9 4461 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4462 return 0;
4463
4464 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4465
4466 switch (plane_state->color_encoding) {
4467 case DRM_COLOR_YCBCR_BT601:
4468 if (full_range)
4469 *color_space = COLOR_SPACE_YCBCR601;
4470 else
4471 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4472 break;
4473
4474 case DRM_COLOR_YCBCR_BT709:
4475 if (full_range)
4476 *color_space = COLOR_SPACE_YCBCR709;
4477 else
4478 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4479 break;
4480
4481 case DRM_COLOR_YCBCR_BT2020:
4482 if (full_range)
4483 *color_space = COLOR_SPACE_2020_YCBCR;
4484 else
4485 return -EINVAL;
4486 break;
4487
4488 default:
4489 return -EINVAL;
4490 }
4491
4492 return 0;
4493}
4494
695af5f9
NK
4495static int
4496fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4497 const struct drm_plane_state *plane_state,
4498 const uint64_t tiling_flags,
4499 struct dc_plane_info *plane_info,
87b7ebc2 4500 struct dc_plane_address *address,
5888f07a 4501 bool tmz_surface,
87b7ebc2 4502 bool force_disable_dcc)
695af5f9
NK
4503{
4504 const struct drm_framebuffer *fb = plane_state->fb;
4505 const struct amdgpu_framebuffer *afb =
4506 to_amdgpu_framebuffer(plane_state->fb);
4507 struct drm_format_name_buf format_name;
4508 int ret;
4509
4510 memset(plane_info, 0, sizeof(*plane_info));
4511
4512 switch (fb->format->format) {
4513 case DRM_FORMAT_C8:
4514 plane_info->format =
4515 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4516 break;
4517 case DRM_FORMAT_RGB565:
4518 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4519 break;
4520 case DRM_FORMAT_XRGB8888:
4521 case DRM_FORMAT_ARGB8888:
4522 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4523 break;
4524 case DRM_FORMAT_XRGB2101010:
4525 case DRM_FORMAT_ARGB2101010:
4526 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4527 break;
4528 case DRM_FORMAT_XBGR2101010:
4529 case DRM_FORMAT_ABGR2101010:
4530 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4531 break;
4532 case DRM_FORMAT_XBGR8888:
4533 case DRM_FORMAT_ABGR8888:
4534 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4535 break;
4536 case DRM_FORMAT_NV21:
4537 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4538 break;
4539 case DRM_FORMAT_NV12:
4540 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4541 break;
cbec6477
SW
4542 case DRM_FORMAT_P010:
4543 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4544 break;
492548dc
SW
4545 case DRM_FORMAT_XRGB16161616F:
4546 case DRM_FORMAT_ARGB16161616F:
4547 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4548 break;
2a5195dc
MK
4549 case DRM_FORMAT_XBGR16161616F:
4550 case DRM_FORMAT_ABGR16161616F:
4551 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4552 break;
695af5f9
NK
4553 default:
4554 DRM_ERROR(
4555 "Unsupported screen format %s\n",
4556 drm_get_format_name(fb->format->format, &format_name));
4557 return -EINVAL;
4558 }
4559
4560 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4561 case DRM_MODE_ROTATE_0:
4562 plane_info->rotation = ROTATION_ANGLE_0;
4563 break;
4564 case DRM_MODE_ROTATE_90:
4565 plane_info->rotation = ROTATION_ANGLE_90;
4566 break;
4567 case DRM_MODE_ROTATE_180:
4568 plane_info->rotation = ROTATION_ANGLE_180;
4569 break;
4570 case DRM_MODE_ROTATE_270:
4571 plane_info->rotation = ROTATION_ANGLE_270;
4572 break;
4573 default:
4574 plane_info->rotation = ROTATION_ANGLE_0;
4575 break;
4576 }
4577
4578 plane_info->visible = true;
4579 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4580
6d83a32d
MS
4581 plane_info->layer_index = 0;
4582
695af5f9
NK
4583 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4584 &plane_info->color_space);
4585 if (ret)
4586 return ret;
4587
4588 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4589 plane_info->rotation, tiling_flags,
4590 &plane_info->tiling_info,
4591 &plane_info->plane_size,
5888f07a 4592 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4593 force_disable_dcc);
695af5f9
NK
4594 if (ret)
4595 return ret;
4596
4597 fill_blending_from_plane_state(
4598 plane_state, &plane_info->per_pixel_alpha,
4599 &plane_info->global_alpha, &plane_info->global_alpha_value);
4600
4601 return 0;
4602}
4603
4604static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4605 struct dc_plane_state *dc_plane_state,
4606 struct drm_plane_state *plane_state,
4607 struct drm_crtc_state *crtc_state)
e7b07cee 4608{
cf020d49 4609 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 4610 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
4611 struct dc_scaling_info scaling_info;
4612 struct dc_plane_info plane_info;
695af5f9 4613 int ret;
87b7ebc2 4614 bool force_disable_dcc = false;
e7b07cee 4615
695af5f9
NK
4616 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4617 if (ret)
4618 return ret;
e7b07cee 4619
695af5f9
NK
4620 dc_plane_state->src_rect = scaling_info.src_rect;
4621 dc_plane_state->dst_rect = scaling_info.dst_rect;
4622 dc_plane_state->clip_rect = scaling_info.clip_rect;
4623 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4624
87b7ebc2 4625 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 4626 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 4627 afb->tiling_flags,
695af5f9 4628 &plane_info,
87b7ebc2 4629 &dc_plane_state->address,
6eed95b0 4630 afb->tmz_surface,
87b7ebc2 4631 force_disable_dcc);
004fefa3
NK
4632 if (ret)
4633 return ret;
4634
695af5f9
NK
4635 dc_plane_state->format = plane_info.format;
4636 dc_plane_state->color_space = plane_info.color_space;
4637 dc_plane_state->format = plane_info.format;
4638 dc_plane_state->plane_size = plane_info.plane_size;
4639 dc_plane_state->rotation = plane_info.rotation;
4640 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4641 dc_plane_state->stereo_format = plane_info.stereo_format;
4642 dc_plane_state->tiling_info = plane_info.tiling_info;
4643 dc_plane_state->visible = plane_info.visible;
4644 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4645 dc_plane_state->global_alpha = plane_info.global_alpha;
4646 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4647 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4648 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4649
e277adc5
LSL
4650 /*
4651 * Always set input transfer function, since plane state is refreshed
4652 * every time.
4653 */
cf020d49
NK
4654 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4655 if (ret)
4656 return ret;
e7b07cee 4657
cf020d49 4658 return 0;
e7b07cee
HW
4659}
4660
3ee6b26b
AD
4661static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4662 const struct dm_connector_state *dm_state,
4663 struct dc_stream_state *stream)
e7b07cee
HW
4664{
4665 enum amdgpu_rmx_type rmx_type;
4666
4667 struct rect src = { 0 }; /* viewport in composition space*/
4668 struct rect dst = { 0 }; /* stream addressable area */
4669
4670 /* no mode. nothing to be done */
4671 if (!mode)
4672 return;
4673
4674 /* Full screen scaling by default */
4675 src.width = mode->hdisplay;
4676 src.height = mode->vdisplay;
4677 dst.width = stream->timing.h_addressable;
4678 dst.height = stream->timing.v_addressable;
4679
f4791779
HW
4680 if (dm_state) {
4681 rmx_type = dm_state->scaling;
4682 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4683 if (src.width * dst.height <
4684 src.height * dst.width) {
4685 /* height needs less upscaling/more downscaling */
4686 dst.width = src.width *
4687 dst.height / src.height;
4688 } else {
4689 /* width needs less upscaling/more downscaling */
4690 dst.height = src.height *
4691 dst.width / src.width;
4692 }
4693 } else if (rmx_type == RMX_CENTER) {
4694 dst = src;
e7b07cee 4695 }
e7b07cee 4696
f4791779
HW
4697 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4698 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4699
f4791779
HW
4700 if (dm_state->underscan_enable) {
4701 dst.x += dm_state->underscan_hborder / 2;
4702 dst.y += dm_state->underscan_vborder / 2;
4703 dst.width -= dm_state->underscan_hborder;
4704 dst.height -= dm_state->underscan_vborder;
4705 }
e7b07cee
HW
4706 }
4707
4708 stream->src = src;
4709 stream->dst = dst;
4710
f1ad2f5e 4711 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4712 dst.x, dst.y, dst.width, dst.height);
4713
4714}
4715
3ee6b26b 4716static enum dc_color_depth
42ba01fc 4717convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4718 bool is_y420, int requested_bpc)
e7b07cee 4719{
1bc22f20 4720 uint8_t bpc;
01c22997 4721
1bc22f20
SW
4722 if (is_y420) {
4723 bpc = 8;
4724
4725 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4726 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4727 bpc = 16;
4728 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4729 bpc = 12;
4730 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4731 bpc = 10;
4732 } else {
4733 bpc = (uint8_t)connector->display_info.bpc;
4734 /* Assume 8 bpc by default if no bpc is specified. */
4735 bpc = bpc ? bpc : 8;
4736 }
e7b07cee 4737
cbd14ae7 4738 if (requested_bpc > 0) {
01c22997
NK
4739 /*
4740 * Cap display bpc based on the user requested value.
4741 *
4742 * The value for state->max_bpc may not correctly updated
4743 * depending on when the connector gets added to the state
4744 * or if this was called outside of atomic check, so it
4745 * can't be used directly.
4746 */
cbd14ae7 4747 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4748
1825fd34
NK
4749 /* Round down to the nearest even number. */
4750 bpc = bpc - (bpc & 1);
4751 }
07e3a1cf 4752
e7b07cee
HW
4753 switch (bpc) {
4754 case 0:
1f6010a9
DF
4755 /*
4756 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4757 * EDID revision before 1.4
4758 * TODO: Fix edid parsing
4759 */
4760 return COLOR_DEPTH_888;
4761 case 6:
4762 return COLOR_DEPTH_666;
4763 case 8:
4764 return COLOR_DEPTH_888;
4765 case 10:
4766 return COLOR_DEPTH_101010;
4767 case 12:
4768 return COLOR_DEPTH_121212;
4769 case 14:
4770 return COLOR_DEPTH_141414;
4771 case 16:
4772 return COLOR_DEPTH_161616;
4773 default:
4774 return COLOR_DEPTH_UNDEFINED;
4775 }
4776}
4777
3ee6b26b
AD
4778static enum dc_aspect_ratio
4779get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4780{
e11d4147
LSL
4781 /* 1-1 mapping, since both enums follow the HDMI spec. */
4782 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4783}
4784
3ee6b26b
AD
4785static enum dc_color_space
4786get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4787{
4788 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4789
4790 switch (dc_crtc_timing->pixel_encoding) {
4791 case PIXEL_ENCODING_YCBCR422:
4792 case PIXEL_ENCODING_YCBCR444:
4793 case PIXEL_ENCODING_YCBCR420:
4794 {
4795 /*
4796 * 27030khz is the separation point between HDTV and SDTV
4797 * according to HDMI spec, we use YCbCr709 and YCbCr601
4798 * respectively
4799 */
380604e2 4800 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4801 if (dc_crtc_timing->flags.Y_ONLY)
4802 color_space =
4803 COLOR_SPACE_YCBCR709_LIMITED;
4804 else
4805 color_space = COLOR_SPACE_YCBCR709;
4806 } else {
4807 if (dc_crtc_timing->flags.Y_ONLY)
4808 color_space =
4809 COLOR_SPACE_YCBCR601_LIMITED;
4810 else
4811 color_space = COLOR_SPACE_YCBCR601;
4812 }
4813
4814 }
4815 break;
4816 case PIXEL_ENCODING_RGB:
4817 color_space = COLOR_SPACE_SRGB;
4818 break;
4819
4820 default:
4821 WARN_ON(1);
4822 break;
4823 }
4824
4825 return color_space;
4826}
4827
ea117312
TA
4828static bool adjust_colour_depth_from_display_info(
4829 struct dc_crtc_timing *timing_out,
4830 const struct drm_display_info *info)
400443e8 4831{
ea117312 4832 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4833 int normalized_clk;
400443e8 4834 do {
380604e2 4835 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4836 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4837 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4838 normalized_clk /= 2;
4839 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4840 switch (depth) {
4841 case COLOR_DEPTH_888:
4842 break;
400443e8
ML
4843 case COLOR_DEPTH_101010:
4844 normalized_clk = (normalized_clk * 30) / 24;
4845 break;
4846 case COLOR_DEPTH_121212:
4847 normalized_clk = (normalized_clk * 36) / 24;
4848 break;
4849 case COLOR_DEPTH_161616:
4850 normalized_clk = (normalized_clk * 48) / 24;
4851 break;
4852 default:
ea117312
TA
4853 /* The above depths are the only ones valid for HDMI. */
4854 return false;
400443e8 4855 }
ea117312
TA
4856 if (normalized_clk <= info->max_tmds_clock) {
4857 timing_out->display_color_depth = depth;
4858 return true;
4859 }
4860 } while (--depth > COLOR_DEPTH_666);
4861 return false;
400443e8 4862}
e7b07cee 4863
42ba01fc
NK
4864static void fill_stream_properties_from_drm_display_mode(
4865 struct dc_stream_state *stream,
4866 const struct drm_display_mode *mode_in,
4867 const struct drm_connector *connector,
4868 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4869 const struct dc_stream_state *old_stream,
4870 int requested_bpc)
e7b07cee
HW
4871{
4872 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4873 const struct drm_display_info *info = &connector->display_info;
d4252eee 4874 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4875 struct hdmi_vendor_infoframe hv_frame;
4876 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4877
acf83f86
WL
4878 memset(&hv_frame, 0, sizeof(hv_frame));
4879 memset(&avi_frame, 0, sizeof(avi_frame));
4880
e7b07cee
HW
4881 timing_out->h_border_left = 0;
4882 timing_out->h_border_right = 0;
4883 timing_out->v_border_top = 0;
4884 timing_out->v_border_bottom = 0;
4885 /* TODO: un-hardcode */
fe61a2f1 4886 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4887 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4888 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4889 else if (drm_mode_is_420_also(info, mode_in)
4890 && aconnector->force_yuv420_output)
4891 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4892 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4893 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4894 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4895 else
4896 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4897
4898 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4899 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4900 connector,
4901 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4902 requested_bpc);
e7b07cee
HW
4903 timing_out->scan_type = SCANNING_TYPE_NODATA;
4904 timing_out->hdmi_vic = 0;
b333730d
BL
4905
4906 if(old_stream) {
4907 timing_out->vic = old_stream->timing.vic;
4908 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4909 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4910 } else {
4911 timing_out->vic = drm_match_cea_mode(mode_in);
4912 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4913 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4914 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4915 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4916 }
e7b07cee 4917
1cb1d477
WL
4918 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4919 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4920 timing_out->vic = avi_frame.video_code;
4921 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4922 timing_out->hdmi_vic = hv_frame.vic;
4923 }
4924
e7b07cee
HW
4925 timing_out->h_addressable = mode_in->crtc_hdisplay;
4926 timing_out->h_total = mode_in->crtc_htotal;
4927 timing_out->h_sync_width =
4928 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4929 timing_out->h_front_porch =
4930 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4931 timing_out->v_total = mode_in->crtc_vtotal;
4932 timing_out->v_addressable = mode_in->crtc_vdisplay;
4933 timing_out->v_front_porch =
4934 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4935 timing_out->v_sync_width =
4936 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4937 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4938 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4939
4940 stream->output_color_space = get_output_color_space(timing_out);
4941
e43a432c
AK
4942 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4943 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4944 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4945 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4946 drm_mode_is_420_also(info, mode_in) &&
4947 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4948 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4949 adjust_colour_depth_from_display_info(timing_out, info);
4950 }
4951 }
e7b07cee
HW
4952}
4953
3ee6b26b
AD
4954static void fill_audio_info(struct audio_info *audio_info,
4955 const struct drm_connector *drm_connector,
4956 const struct dc_sink *dc_sink)
e7b07cee
HW
4957{
4958 int i = 0;
4959 int cea_revision = 0;
4960 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4961
4962 audio_info->manufacture_id = edid_caps->manufacturer_id;
4963 audio_info->product_id = edid_caps->product_id;
4964
4965 cea_revision = drm_connector->display_info.cea_rev;
4966
090afc1e 4967 strscpy(audio_info->display_name,
d2b2562c 4968 edid_caps->display_name,
090afc1e 4969 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4970
b830ebc9 4971 if (cea_revision >= 3) {
e7b07cee
HW
4972 audio_info->mode_count = edid_caps->audio_mode_count;
4973
4974 for (i = 0; i < audio_info->mode_count; ++i) {
4975 audio_info->modes[i].format_code =
4976 (enum audio_format_code)
4977 (edid_caps->audio_modes[i].format_code);
4978 audio_info->modes[i].channel_count =
4979 edid_caps->audio_modes[i].channel_count;
4980 audio_info->modes[i].sample_rates.all =
4981 edid_caps->audio_modes[i].sample_rate;
4982 audio_info->modes[i].sample_size =
4983 edid_caps->audio_modes[i].sample_size;
4984 }
4985 }
4986
4987 audio_info->flags.all = edid_caps->speaker_flags;
4988
4989 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4990 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4991 audio_info->video_latency = drm_connector->video_latency[0];
4992 audio_info->audio_latency = drm_connector->audio_latency[0];
4993 }
4994
4995 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4996
4997}
4998
3ee6b26b
AD
4999static void
5000copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5001 struct drm_display_mode *dst_mode)
e7b07cee
HW
5002{
5003 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5004 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5005 dst_mode->crtc_clock = src_mode->crtc_clock;
5006 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5007 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5008 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5009 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5010 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5011 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5012 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5013 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5014 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5015 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5016 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5017}
5018
3ee6b26b
AD
5019static void
5020decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5021 const struct drm_display_mode *native_mode,
5022 bool scale_enabled)
e7b07cee
HW
5023{
5024 if (scale_enabled) {
5025 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5026 } else if (native_mode->clock == drm_mode->clock &&
5027 native_mode->htotal == drm_mode->htotal &&
5028 native_mode->vtotal == drm_mode->vtotal) {
5029 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5030 } else {
5031 /* no scaling nor amdgpu inserted, no need to patch */
5032 }
5033}
5034
aed15309
ML
5035static struct dc_sink *
5036create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5037{
2e0ac3d6 5038 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5039 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5040 sink_init_data.link = aconnector->dc_link;
5041 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5042
5043 sink = dc_sink_create(&sink_init_data);
423788c7 5044 if (!sink) {
2e0ac3d6 5045 DRM_ERROR("Failed to create sink!\n");
aed15309 5046 return NULL;
423788c7 5047 }
2e0ac3d6 5048 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5049
aed15309 5050 return sink;
2e0ac3d6
HW
5051}
5052
fa2123db
ML
5053static void set_multisync_trigger_params(
5054 struct dc_stream_state *stream)
5055{
5056 if (stream->triggered_crtc_reset.enabled) {
5057 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5058 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5059 }
5060}
5061
5062static void set_master_stream(struct dc_stream_state *stream_set[],
5063 int stream_count)
5064{
5065 int j, highest_rfr = 0, master_stream = 0;
5066
5067 for (j = 0; j < stream_count; j++) {
5068 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5069 int refresh_rate = 0;
5070
380604e2 5071 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5072 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5073 if (refresh_rate > highest_rfr) {
5074 highest_rfr = refresh_rate;
5075 master_stream = j;
5076 }
5077 }
5078 }
5079 for (j = 0; j < stream_count; j++) {
03736f4c 5080 if (stream_set[j])
fa2123db
ML
5081 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5082 }
5083}
5084
5085static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5086{
5087 int i = 0;
5088
5089 if (context->stream_count < 2)
5090 return;
5091 for (i = 0; i < context->stream_count ; i++) {
5092 if (!context->streams[i])
5093 continue;
1f6010a9
DF
5094 /*
5095 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5096 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5097 * For now it's set to false
fa2123db
ML
5098 */
5099 set_multisync_trigger_params(context->streams[i]);
5100 }
5101 set_master_stream(context->streams, context->stream_count);
5102}
5103
3ee6b26b
AD
5104static struct dc_stream_state *
5105create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5106 const struct drm_display_mode *drm_mode,
b333730d 5107 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5108 const struct dc_stream_state *old_stream,
5109 int requested_bpc)
e7b07cee
HW
5110{
5111 struct drm_display_mode *preferred_mode = NULL;
391ef035 5112 struct drm_connector *drm_connector;
42ba01fc
NK
5113 const struct drm_connector_state *con_state =
5114 dm_state ? &dm_state->base : NULL;
0971c40e 5115 struct dc_stream_state *stream = NULL;
e7b07cee
HW
5116 struct drm_display_mode mode = *drm_mode;
5117 bool native_mode_found = false;
b333730d
BL
5118 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5119 int mode_refresh;
58124bf8 5120 int preferred_refresh = 0;
defeb878 5121#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015
DF
5122 struct dsc_dec_dpcd_caps dsc_caps;
5123 uint32_t link_bandwidth_kbps;
7c431455 5124#endif
aed15309 5125 struct dc_sink *sink = NULL;
b830ebc9 5126 if (aconnector == NULL) {
e7b07cee 5127 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5128 return stream;
e7b07cee
HW
5129 }
5130
e7b07cee 5131 drm_connector = &aconnector->base;
2e0ac3d6 5132
f4ac176e 5133 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5134 sink = create_fake_sink(aconnector);
5135 if (!sink)
5136 return stream;
aed15309
ML
5137 } else {
5138 sink = aconnector->dc_sink;
dcd5fb82 5139 dc_sink_retain(sink);
f4ac176e 5140 }
2e0ac3d6 5141
aed15309 5142 stream = dc_create_stream_for_sink(sink);
4562236b 5143
b830ebc9 5144 if (stream == NULL) {
e7b07cee 5145 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5146 goto finish;
e7b07cee
HW
5147 }
5148
ceb3dbb4
JL
5149 stream->dm_stream_context = aconnector;
5150
4a36fcba
WL
5151 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5152 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5153
e7b07cee
HW
5154 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5155 /* Search for preferred mode */
5156 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5157 native_mode_found = true;
5158 break;
5159 }
5160 }
5161 if (!native_mode_found)
5162 preferred_mode = list_first_entry_or_null(
5163 &aconnector->base.modes,
5164 struct drm_display_mode,
5165 head);
5166
b333730d
BL
5167 mode_refresh = drm_mode_vrefresh(&mode);
5168
b830ebc9 5169 if (preferred_mode == NULL) {
1f6010a9
DF
5170 /*
5171 * This may not be an error, the use case is when we have no
e7b07cee
HW
5172 * usermode calls to reset and set mode upon hotplug. In this
5173 * case, we call set mode ourselves to restore the previous mode
5174 * and the modelist may not be filled in in time.
5175 */
f1ad2f5e 5176 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
5177 } else {
5178 decide_crtc_timing_for_drm_display_mode(
5179 &mode, preferred_mode,
f4791779 5180 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 5181 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5182 }
5183
f783577c
JFZ
5184 if (!dm_state)
5185 drm_mode_set_crtcinfo(&mode, 0);
5186
b333730d
BL
5187 /*
5188 * If scaling is enabled and refresh rate didn't change
5189 * we copy the vic and polarities of the old timings
5190 */
5191 if (!scale || mode_refresh != preferred_refresh)
5192 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5193 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
5194 else
5195 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 5196 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 5197
df2f1015
DF
5198 stream->timing.flags.DSC = 0;
5199
5200 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5201#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5202 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5203 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5204 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015
DF
5205 &dsc_caps);
5206 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5207 dc_link_get_link_cap(aconnector->dc_link));
5208
0749ddeb 5209 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5210 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5211 dc_dsc_policy_set_enable_dsc_when_not_needed(
5212 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5213
0417df16 5214 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5215 &dsc_caps,
0417df16 5216 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5217 0,
df2f1015
DF
5218 link_bandwidth_kbps,
5219 &stream->timing,
5220 &stream->timing.dsc_cfg))
5221 stream->timing.flags.DSC = 1;
27e84dd7 5222 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5223 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5224 stream->timing.flags.DSC = 1;
734e4c97 5225
28b2f656
EB
5226 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5227 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5228
28b2f656
EB
5229 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5230 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5231
5232 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5233 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5234 }
39a4eb85 5235#endif
df2f1015 5236 }
39a4eb85 5237
e7b07cee
HW
5238 update_stream_scaling_settings(&mode, dm_state, stream);
5239
5240 fill_audio_info(
5241 &stream->audio_info,
5242 drm_connector,
aed15309 5243 sink);
e7b07cee 5244
ceb3dbb4 5245 update_stream_signal(stream, sink);
9182b4cb 5246
d832fc3b 5247 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5248 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5249
8a488f5d
RL
5250 if (stream->link->psr_settings.psr_feature_enabled) {
5251 //
5252 // should decide stream support vsc sdp colorimetry capability
5253 // before building vsc info packet
5254 //
5255 stream->use_vsc_sdp_for_colorimetry = false;
5256 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5257 stream->use_vsc_sdp_for_colorimetry =
5258 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5259 } else {
5260 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5261 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5262 }
8a488f5d 5263 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5264 }
aed15309 5265finish:
dcd5fb82 5266 dc_sink_release(sink);
9e3efe3e 5267
e7b07cee
HW
5268 return stream;
5269}
5270
7578ecda 5271static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5272{
5273 drm_crtc_cleanup(crtc);
5274 kfree(crtc);
5275}
5276
5277static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5278 struct drm_crtc_state *state)
e7b07cee
HW
5279{
5280 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5281
5282 /* TODO Destroy dc_stream objects are stream object is flattened */
5283 if (cur->stream)
5284 dc_stream_release(cur->stream);
5285
5286
5287 __drm_atomic_helper_crtc_destroy_state(state);
5288
5289
5290 kfree(state);
5291}
5292
5293static void dm_crtc_reset_state(struct drm_crtc *crtc)
5294{
5295 struct dm_crtc_state *state;
5296
5297 if (crtc->state)
5298 dm_crtc_destroy_state(crtc, crtc->state);
5299
5300 state = kzalloc(sizeof(*state), GFP_KERNEL);
5301 if (WARN_ON(!state))
5302 return;
5303
1f8a52ec 5304 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5305}
5306
5307static struct drm_crtc_state *
5308dm_crtc_duplicate_state(struct drm_crtc *crtc)
5309{
5310 struct dm_crtc_state *state, *cur;
5311
5312 cur = to_dm_crtc_state(crtc->state);
5313
5314 if (WARN_ON(!crtc->state))
5315 return NULL;
5316
2004f45e 5317 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5318 if (!state)
5319 return NULL;
e7b07cee
HW
5320
5321 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5322
5323 if (cur->stream) {
5324 state->stream = cur->stream;
5325 dc_stream_retain(state->stream);
5326 }
5327
d6ef9b41 5328 state->active_planes = cur->active_planes;
98e6436d 5329 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5330 state->abm_level = cur->abm_level;
bb47de73
NK
5331 state->vrr_supported = cur->vrr_supported;
5332 state->freesync_config = cur->freesync_config;
14b25846 5333 state->crc_src = cur->crc_src;
cf020d49
NK
5334 state->cm_has_degamma = cur->cm_has_degamma;
5335 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
c920888c
WL
5336#ifdef CONFIG_DEBUG_FS
5337 state->crc_window = cur->crc_window;
5338#endif
e7b07cee
HW
5339 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5340
5341 return &state->base;
5342}
5343
c920888c 5344#ifdef CONFIG_DEBUG_FS
8ccbfdf0 5345static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
c920888c
WL
5346 struct drm_crtc_state *crtc_state,
5347 struct drm_property *property,
5348 uint64_t val)
5349{
5350 struct drm_device *dev = crtc->dev;
5351 struct amdgpu_device *adev = drm_to_adev(dev);
5352 struct dm_crtc_state *dm_new_state =
5353 to_dm_crtc_state(crtc_state);
5354
5355 if (property == adev->dm.crc_win_x_start_property)
5356 dm_new_state->crc_window.x_start = val;
5357 else if (property == adev->dm.crc_win_y_start_property)
5358 dm_new_state->crc_window.y_start = val;
5359 else if (property == adev->dm.crc_win_x_end_property)
5360 dm_new_state->crc_window.x_end = val;
5361 else if (property == adev->dm.crc_win_y_end_property)
5362 dm_new_state->crc_window.y_end = val;
5363 else
5364 return -EINVAL;
5365
5366 return 0;
5367}
5368
8ccbfdf0 5369static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
c920888c
WL
5370 const struct drm_crtc_state *state,
5371 struct drm_property *property,
5372 uint64_t *val)
5373{
5374 struct drm_device *dev = crtc->dev;
5375 struct amdgpu_device *adev = drm_to_adev(dev);
5376 struct dm_crtc_state *dm_state =
5377 to_dm_crtc_state(state);
5378
5379 if (property == adev->dm.crc_win_x_start_property)
5380 *val = dm_state->crc_window.x_start;
5381 else if (property == adev->dm.crc_win_y_start_property)
5382 *val = dm_state->crc_window.y_start;
5383 else if (property == adev->dm.crc_win_x_end_property)
5384 *val = dm_state->crc_window.x_end;
5385 else if (property == adev->dm.crc_win_y_end_property)
5386 *val = dm_state->crc_window.y_end;
5387 else
5388 return -EINVAL;
5389
5390 return 0;
5391}
5392#endif
5393
d2574c33
MK
5394static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5395{
5396 enum dc_irq_source irq_source;
5397 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5398 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5399 int rc;
5400
5401 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5402
5403 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5404
5405 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5406 acrtc->crtc_id, enable ? "en" : "dis", rc);
5407 return rc;
5408}
589d2739
HW
5409
5410static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5411{
5412 enum dc_irq_source irq_source;
5413 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5414 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5415 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5416 int rc = 0;
5417
5418 if (enable) {
5419 /* vblank irq on -> Only need vupdate irq in vrr mode */
5420 if (amdgpu_dm_vrr_active(acrtc_state))
5421 rc = dm_set_vupdate_irq(crtc, true);
5422 } else {
5423 /* vblank irq off -> vupdate irq off */
5424 rc = dm_set_vupdate_irq(crtc, false);
5425 }
5426
5427 if (rc)
5428 return rc;
589d2739
HW
5429
5430 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 5431 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
5432}
5433
5434static int dm_enable_vblank(struct drm_crtc *crtc)
5435{
5436 return dm_set_vblank(crtc, true);
5437}
5438
5439static void dm_disable_vblank(struct drm_crtc *crtc)
5440{
5441 dm_set_vblank(crtc, false);
5442}
5443
e7b07cee
HW
5444/* Implemented only the options currently availible for the driver */
5445static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5446 .reset = dm_crtc_reset_state,
5447 .destroy = amdgpu_dm_crtc_destroy,
5448 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5449 .set_config = drm_atomic_helper_set_config,
5450 .page_flip = drm_atomic_helper_page_flip,
5451 .atomic_duplicate_state = dm_crtc_duplicate_state,
5452 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5453 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5454 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5455 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5456 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5457 .enable_vblank = dm_enable_vblank,
5458 .disable_vblank = dm_disable_vblank,
e3eff4b5 5459 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
c920888c
WL
5460#ifdef CONFIG_DEBUG_FS
5461 .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5462 .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5463#endif
e7b07cee
HW
5464};
5465
5466static enum drm_connector_status
5467amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5468{
5469 bool connected;
c84dec2f 5470 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5471
1f6010a9
DF
5472 /*
5473 * Notes:
e7b07cee
HW
5474 * 1. This interface is NOT called in context of HPD irq.
5475 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5476 * makes it a bad place for *any* MST-related activity.
5477 */
e7b07cee 5478
8580d60b
HW
5479 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5480 !aconnector->fake_enable)
e7b07cee
HW
5481 connected = (aconnector->dc_sink != NULL);
5482 else
5483 connected = (aconnector->base.force == DRM_FORCE_ON);
5484
0f877894
OV
5485 update_subconnector_property(aconnector);
5486
e7b07cee
HW
5487 return (connected ? connector_status_connected :
5488 connector_status_disconnected);
5489}
5490
3ee6b26b
AD
5491int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5492 struct drm_connector_state *connector_state,
5493 struct drm_property *property,
5494 uint64_t val)
e7b07cee
HW
5495{
5496 struct drm_device *dev = connector->dev;
1348969a 5497 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5498 struct dm_connector_state *dm_old_state =
5499 to_dm_connector_state(connector->state);
5500 struct dm_connector_state *dm_new_state =
5501 to_dm_connector_state(connector_state);
5502
5503 int ret = -EINVAL;
5504
5505 if (property == dev->mode_config.scaling_mode_property) {
5506 enum amdgpu_rmx_type rmx_type;
5507
5508 switch (val) {
5509 case DRM_MODE_SCALE_CENTER:
5510 rmx_type = RMX_CENTER;
5511 break;
5512 case DRM_MODE_SCALE_ASPECT:
5513 rmx_type = RMX_ASPECT;
5514 break;
5515 case DRM_MODE_SCALE_FULLSCREEN:
5516 rmx_type = RMX_FULL;
5517 break;
5518 case DRM_MODE_SCALE_NONE:
5519 default:
5520 rmx_type = RMX_OFF;
5521 break;
5522 }
5523
5524 if (dm_old_state->scaling == rmx_type)
5525 return 0;
5526
5527 dm_new_state->scaling = rmx_type;
5528 ret = 0;
5529 } else if (property == adev->mode_info.underscan_hborder_property) {
5530 dm_new_state->underscan_hborder = val;
5531 ret = 0;
5532 } else if (property == adev->mode_info.underscan_vborder_property) {
5533 dm_new_state->underscan_vborder = val;
5534 ret = 0;
5535 } else if (property == adev->mode_info.underscan_property) {
5536 dm_new_state->underscan_enable = val;
5537 ret = 0;
c1ee92f9
DF
5538 } else if (property == adev->mode_info.abm_level_property) {
5539 dm_new_state->abm_level = val;
5540 ret = 0;
e7b07cee
HW
5541 }
5542
5543 return ret;
5544}
5545
3ee6b26b
AD
5546int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5547 const struct drm_connector_state *state,
5548 struct drm_property *property,
5549 uint64_t *val)
e7b07cee
HW
5550{
5551 struct drm_device *dev = connector->dev;
1348969a 5552 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5553 struct dm_connector_state *dm_state =
5554 to_dm_connector_state(state);
5555 int ret = -EINVAL;
5556
5557 if (property == dev->mode_config.scaling_mode_property) {
5558 switch (dm_state->scaling) {
5559 case RMX_CENTER:
5560 *val = DRM_MODE_SCALE_CENTER;
5561 break;
5562 case RMX_ASPECT:
5563 *val = DRM_MODE_SCALE_ASPECT;
5564 break;
5565 case RMX_FULL:
5566 *val = DRM_MODE_SCALE_FULLSCREEN;
5567 break;
5568 case RMX_OFF:
5569 default:
5570 *val = DRM_MODE_SCALE_NONE;
5571 break;
5572 }
5573 ret = 0;
5574 } else if (property == adev->mode_info.underscan_hborder_property) {
5575 *val = dm_state->underscan_hborder;
5576 ret = 0;
5577 } else if (property == adev->mode_info.underscan_vborder_property) {
5578 *val = dm_state->underscan_vborder;
5579 ret = 0;
5580 } else if (property == adev->mode_info.underscan_property) {
5581 *val = dm_state->underscan_enable;
5582 ret = 0;
c1ee92f9
DF
5583 } else if (property == adev->mode_info.abm_level_property) {
5584 *val = dm_state->abm_level;
5585 ret = 0;
e7b07cee 5586 }
c1ee92f9 5587
e7b07cee
HW
5588 return ret;
5589}
5590
526c654a
ED
5591static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5592{
5593 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5594
5595 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5596}
5597
7578ecda 5598static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5599{
c84dec2f 5600 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5601 const struct dc_link *link = aconnector->dc_link;
1348969a 5602 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5603 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5604
5dff80bd
AG
5605 /*
5606 * Call only if mst_mgr was iniitalized before since it's not done
5607 * for all connector types.
5608 */
5609 if (aconnector->mst_mgr.dev)
5610 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5611
e7b07cee
HW
5612#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5613 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5614
89fc8d4e 5615 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5616 link->type != dc_connection_none &&
5617 dm->backlight_dev) {
5618 backlight_device_unregister(dm->backlight_dev);
5619 dm->backlight_dev = NULL;
e7b07cee
HW
5620 }
5621#endif
dcd5fb82
MF
5622
5623 if (aconnector->dc_em_sink)
5624 dc_sink_release(aconnector->dc_em_sink);
5625 aconnector->dc_em_sink = NULL;
5626 if (aconnector->dc_sink)
5627 dc_sink_release(aconnector->dc_sink);
5628 aconnector->dc_sink = NULL;
5629
e86e8947 5630 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5631 drm_connector_unregister(connector);
5632 drm_connector_cleanup(connector);
526c654a
ED
5633 if (aconnector->i2c) {
5634 i2c_del_adapter(&aconnector->i2c->base);
5635 kfree(aconnector->i2c);
5636 }
7daec99f 5637 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5638
e7b07cee
HW
5639 kfree(connector);
5640}
5641
5642void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5643{
5644 struct dm_connector_state *state =
5645 to_dm_connector_state(connector->state);
5646
df099b9b
LSL
5647 if (connector->state)
5648 __drm_atomic_helper_connector_destroy_state(connector->state);
5649
e7b07cee
HW
5650 kfree(state);
5651
5652 state = kzalloc(sizeof(*state), GFP_KERNEL);
5653
5654 if (state) {
5655 state->scaling = RMX_OFF;
5656 state->underscan_enable = false;
5657 state->underscan_hborder = 0;
5658 state->underscan_vborder = 0;
01933ba4 5659 state->base.max_requested_bpc = 8;
3261e013
ML
5660 state->vcpi_slots = 0;
5661 state->pbn = 0;
c3e50f89
NK
5662 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5663 state->abm_level = amdgpu_dm_abm_level;
5664
df099b9b 5665 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5666 }
5667}
5668
3ee6b26b
AD
5669struct drm_connector_state *
5670amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5671{
5672 struct dm_connector_state *state =
5673 to_dm_connector_state(connector->state);
5674
5675 struct dm_connector_state *new_state =
5676 kmemdup(state, sizeof(*state), GFP_KERNEL);
5677
98e6436d
AK
5678 if (!new_state)
5679 return NULL;
e7b07cee 5680
98e6436d
AK
5681 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5682
5683 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5684 new_state->abm_level = state->abm_level;
922454c2
NK
5685 new_state->scaling = state->scaling;
5686 new_state->underscan_enable = state->underscan_enable;
5687 new_state->underscan_hborder = state->underscan_hborder;
5688 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5689 new_state->vcpi_slots = state->vcpi_slots;
5690 new_state->pbn = state->pbn;
98e6436d 5691 return &new_state->base;
e7b07cee
HW
5692}
5693
14f04fa4
AD
5694static int
5695amdgpu_dm_connector_late_register(struct drm_connector *connector)
5696{
5697 struct amdgpu_dm_connector *amdgpu_dm_connector =
5698 to_amdgpu_dm_connector(connector);
00a8037e 5699 int r;
14f04fa4 5700
00a8037e
AD
5701 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5702 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5703 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5704 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5705 if (r)
5706 return r;
5707 }
5708
5709#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5710 connector_debugfs_init(amdgpu_dm_connector);
5711#endif
5712
5713 return 0;
5714}
5715
e7b07cee
HW
5716static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5717 .reset = amdgpu_dm_connector_funcs_reset,
5718 .detect = amdgpu_dm_connector_detect,
5719 .fill_modes = drm_helper_probe_single_connector_modes,
5720 .destroy = amdgpu_dm_connector_destroy,
5721 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5722 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5723 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5724 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5725 .late_register = amdgpu_dm_connector_late_register,
526c654a 5726 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5727};
5728
e7b07cee
HW
5729static int get_modes(struct drm_connector *connector)
5730{
5731 return amdgpu_dm_connector_get_modes(connector);
5732}
5733
c84dec2f 5734static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5735{
5736 struct dc_sink_init_data init_params = {
5737 .link = aconnector->dc_link,
5738 .sink_signal = SIGNAL_TYPE_VIRTUAL
5739 };
70e8ffc5 5740 struct edid *edid;
e7b07cee 5741
a89ff457 5742 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5743 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5744 aconnector->base.name);
5745
5746 aconnector->base.force = DRM_FORCE_OFF;
5747 aconnector->base.override_edid = false;
5748 return;
5749 }
5750
70e8ffc5
HW
5751 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5752
e7b07cee
HW
5753 aconnector->edid = edid;
5754
5755 aconnector->dc_em_sink = dc_link_add_remote_sink(
5756 aconnector->dc_link,
5757 (uint8_t *)edid,
5758 (edid->extensions + 1) * EDID_LENGTH,
5759 &init_params);
5760
dcd5fb82 5761 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5762 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5763 aconnector->dc_link->local_sink :
5764 aconnector->dc_em_sink;
dcd5fb82
MF
5765 dc_sink_retain(aconnector->dc_sink);
5766 }
e7b07cee
HW
5767}
5768
c84dec2f 5769static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5770{
5771 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5772
1f6010a9
DF
5773 /*
5774 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5775 * Those settings have to be != 0 to get initial modeset
5776 */
5777 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5778 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5779 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5780 }
5781
5782
5783 aconnector->base.override_edid = true;
5784 create_eml_sink(aconnector);
5785}
5786
cbd14ae7
SW
5787static struct dc_stream_state *
5788create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5789 const struct drm_display_mode *drm_mode,
5790 const struct dm_connector_state *dm_state,
5791 const struct dc_stream_state *old_stream)
5792{
5793 struct drm_connector *connector = &aconnector->base;
1348969a 5794 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5795 struct dc_stream_state *stream;
4b7da34b
SW
5796 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5797 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5798 enum dc_status dc_result = DC_OK;
5799
5800 do {
5801 stream = create_stream_for_sink(aconnector, drm_mode,
5802 dm_state, old_stream,
5803 requested_bpc);
5804 if (stream == NULL) {
5805 DRM_ERROR("Failed to create stream for sink!\n");
5806 break;
5807 }
5808
5809 dc_result = dc_validate_stream(adev->dm.dc, stream);
5810
5811 if (dc_result != DC_OK) {
74a16675 5812 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5813 drm_mode->hdisplay,
5814 drm_mode->vdisplay,
5815 drm_mode->clock,
74a16675
RS
5816 dc_result,
5817 dc_status_to_str(dc_result));
cbd14ae7
SW
5818
5819 dc_stream_release(stream);
5820 stream = NULL;
5821 requested_bpc -= 2; /* lower bpc to retry validation */
5822 }
5823
5824 } while (stream == NULL && requested_bpc >= 6);
5825
5826 return stream;
5827}
5828
ba9ca088 5829enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5830 struct drm_display_mode *mode)
e7b07cee
HW
5831{
5832 int result = MODE_ERROR;
5833 struct dc_sink *dc_sink;
e7b07cee 5834 /* TODO: Unhardcode stream count */
0971c40e 5835 struct dc_stream_state *stream;
c84dec2f 5836 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5837
5838 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5839 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5840 return result;
5841
1f6010a9
DF
5842 /*
5843 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5844 * EDID mgmt
5845 */
5846 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5847 !aconnector->dc_em_sink)
5848 handle_edid_mgmt(aconnector);
5849
c84dec2f 5850 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5851
ad975f44
VL
5852 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5853 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
5854 DRM_ERROR("dc_sink is NULL!\n");
5855 goto fail;
5856 }
5857
cbd14ae7
SW
5858 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5859 if (stream) {
5860 dc_stream_release(stream);
e7b07cee 5861 result = MODE_OK;
cbd14ae7 5862 }
e7b07cee
HW
5863
5864fail:
5865 /* TODO: error handling*/
5866 return result;
5867}
5868
88694af9
NK
5869static int fill_hdr_info_packet(const struct drm_connector_state *state,
5870 struct dc_info_packet *out)
5871{
5872 struct hdmi_drm_infoframe frame;
5873 unsigned char buf[30]; /* 26 + 4 */
5874 ssize_t len;
5875 int ret, i;
5876
5877 memset(out, 0, sizeof(*out));
5878
5879 if (!state->hdr_output_metadata)
5880 return 0;
5881
5882 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5883 if (ret)
5884 return ret;
5885
5886 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5887 if (len < 0)
5888 return (int)len;
5889
5890 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5891 if (len != 30)
5892 return -EINVAL;
5893
5894 /* Prepare the infopacket for DC. */
5895 switch (state->connector->connector_type) {
5896 case DRM_MODE_CONNECTOR_HDMIA:
5897 out->hb0 = 0x87; /* type */
5898 out->hb1 = 0x01; /* version */
5899 out->hb2 = 0x1A; /* length */
5900 out->sb[0] = buf[3]; /* checksum */
5901 i = 1;
5902 break;
5903
5904 case DRM_MODE_CONNECTOR_DisplayPort:
5905 case DRM_MODE_CONNECTOR_eDP:
5906 out->hb0 = 0x00; /* sdp id, zero */
5907 out->hb1 = 0x87; /* type */
5908 out->hb2 = 0x1D; /* payload len - 1 */
5909 out->hb3 = (0x13 << 2); /* sdp version */
5910 out->sb[0] = 0x01; /* version */
5911 out->sb[1] = 0x1A; /* length */
5912 i = 2;
5913 break;
5914
5915 default:
5916 return -EINVAL;
5917 }
5918
5919 memcpy(&out->sb[i], &buf[4], 26);
5920 out->valid = true;
5921
5922 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5923 sizeof(out->sb), false);
5924
5925 return 0;
5926}
5927
5928static bool
5929is_hdr_metadata_different(const struct drm_connector_state *old_state,
5930 const struct drm_connector_state *new_state)
5931{
5932 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5933 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5934
5935 if (old_blob != new_blob) {
5936 if (old_blob && new_blob &&
5937 old_blob->length == new_blob->length)
5938 return memcmp(old_blob->data, new_blob->data,
5939 old_blob->length);
5940
5941 return true;
5942 }
5943
5944 return false;
5945}
5946
5947static int
5948amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5949 struct drm_atomic_state *state)
88694af9 5950{
51e857af
SP
5951 struct drm_connector_state *new_con_state =
5952 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5953 struct drm_connector_state *old_con_state =
5954 drm_atomic_get_old_connector_state(state, conn);
5955 struct drm_crtc *crtc = new_con_state->crtc;
5956 struct drm_crtc_state *new_crtc_state;
5957 int ret;
5958
e8a98235
RS
5959 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5960
88694af9
NK
5961 if (!crtc)
5962 return 0;
5963
5964 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5965 struct dc_info_packet hdr_infopacket;
5966
5967 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5968 if (ret)
5969 return ret;
5970
5971 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5972 if (IS_ERR(new_crtc_state))
5973 return PTR_ERR(new_crtc_state);
5974
5975 /*
5976 * DC considers the stream backends changed if the
5977 * static metadata changes. Forcing the modeset also
5978 * gives a simple way for userspace to switch from
b232d4ed
NK
5979 * 8bpc to 10bpc when setting the metadata to enter
5980 * or exit HDR.
5981 *
5982 * Changing the static metadata after it's been
5983 * set is permissible, however. So only force a
5984 * modeset if we're entering or exiting HDR.
88694af9 5985 */
b232d4ed
NK
5986 new_crtc_state->mode_changed =
5987 !old_con_state->hdr_output_metadata ||
5988 !new_con_state->hdr_output_metadata;
88694af9
NK
5989 }
5990
5991 return 0;
5992}
5993
e7b07cee
HW
5994static const struct drm_connector_helper_funcs
5995amdgpu_dm_connector_helper_funcs = {
5996 /*
1f6010a9 5997 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5998 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5999 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6000 * in get_modes call back, not just return the modes count
6001 */
e7b07cee
HW
6002 .get_modes = get_modes,
6003 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6004 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6005};
6006
6007static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6008{
6009}
6010
d6ef9b41 6011static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6012{
6013 struct drm_atomic_state *state = new_crtc_state->state;
6014 struct drm_plane *plane;
6015 int num_active = 0;
6016
6017 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6018 struct drm_plane_state *new_plane_state;
6019
6020 /* Cursor planes are "fake". */
6021 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6022 continue;
6023
6024 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6025
6026 if (!new_plane_state) {
6027 /*
6028 * The plane is enable on the CRTC and hasn't changed
6029 * state. This means that it previously passed
6030 * validation and is therefore enabled.
6031 */
6032 num_active += 1;
6033 continue;
6034 }
6035
6036 /* We need a framebuffer to be considered enabled. */
6037 num_active += (new_plane_state->fb != NULL);
6038 }
6039
d6ef9b41
NK
6040 return num_active;
6041}
6042
8fe684e9
NK
6043static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6044 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6045{
6046 struct dm_crtc_state *dm_new_crtc_state =
6047 to_dm_crtc_state(new_crtc_state);
6048
6049 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6050
6051 if (!dm_new_crtc_state->stream)
6052 return;
6053
6054 dm_new_crtc_state->active_planes =
6055 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6056}
6057
3ee6b26b 6058static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6059 struct drm_atomic_state *state)
e7b07cee 6060{
29b77ad7
MR
6061 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6062 crtc);
1348969a 6063 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6064 struct dc *dc = adev->dm.dc;
29b77ad7 6065 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6066 int ret = -EINVAL;
6067
5b8c5969 6068 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6069
29b77ad7 6070 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6071
9b690ef3 6072 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6073 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6074 WARN_ON(1);
6075 return ret;
6076 }
6077
bc92c065 6078 /*
b836a274
MD
6079 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6080 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6081 * planes are disabled, which is not supported by the hardware. And there is legacy
6082 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6083 */
29b77ad7 6084 if (crtc_state->enable &&
ea9522f5
SS
6085 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6086 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6087 return -EINVAL;
ea9522f5 6088 }
c14a005c 6089
b836a274
MD
6090 /* In some use cases, like reset, no stream is attached */
6091 if (!dm_crtc_state->stream)
6092 return 0;
6093
62c933f9 6094 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6095 return 0;
6096
ea9522f5 6097 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6098 return ret;
6099}
6100
3ee6b26b
AD
6101static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6102 const struct drm_display_mode *mode,
6103 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6104{
6105 return true;
6106}
6107
6108static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6109 .disable = dm_crtc_helper_disable,
6110 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6111 .mode_fixup = dm_crtc_helper_mode_fixup,
6112 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6113};
6114
6115static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6116{
6117
6118}
6119
3261e013
ML
6120static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6121{
6122 switch (display_color_depth) {
6123 case COLOR_DEPTH_666:
6124 return 6;
6125 case COLOR_DEPTH_888:
6126 return 8;
6127 case COLOR_DEPTH_101010:
6128 return 10;
6129 case COLOR_DEPTH_121212:
6130 return 12;
6131 case COLOR_DEPTH_141414:
6132 return 14;
6133 case COLOR_DEPTH_161616:
6134 return 16;
6135 default:
6136 break;
6137 }
6138 return 0;
6139}
6140
3ee6b26b
AD
6141static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6142 struct drm_crtc_state *crtc_state,
6143 struct drm_connector_state *conn_state)
e7b07cee 6144{
3261e013
ML
6145 struct drm_atomic_state *state = crtc_state->state;
6146 struct drm_connector *connector = conn_state->connector;
6147 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6148 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6149 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6150 struct drm_dp_mst_topology_mgr *mst_mgr;
6151 struct drm_dp_mst_port *mst_port;
6152 enum dc_color_depth color_depth;
6153 int clock, bpp = 0;
1bc22f20 6154 bool is_y420 = false;
3261e013
ML
6155
6156 if (!aconnector->port || !aconnector->dc_sink)
6157 return 0;
6158
6159 mst_port = aconnector->port;
6160 mst_mgr = &aconnector->mst_port->mst_mgr;
6161
6162 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6163 return 0;
6164
6165 if (!state->duplicated) {
cbd14ae7 6166 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6167 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6168 aconnector->force_yuv420_output;
cbd14ae7
SW
6169 color_depth = convert_color_depth_from_display_info(connector,
6170 is_y420,
6171 max_bpc);
3261e013
ML
6172 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6173 clock = adjusted_mode->clock;
dc48529f 6174 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6175 }
6176 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6177 mst_mgr,
6178 mst_port,
1c6c1cb5 6179 dm_new_connector_state->pbn,
03ca9600 6180 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6181 if (dm_new_connector_state->vcpi_slots < 0) {
6182 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6183 return dm_new_connector_state->vcpi_slots;
6184 }
e7b07cee
HW
6185 return 0;
6186}
6187
6188const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6189 .disable = dm_encoder_helper_disable,
6190 .atomic_check = dm_encoder_helper_atomic_check
6191};
6192
d9fe1a4c 6193#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6194static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6195 struct dc_state *dc_state)
6196{
6197 struct dc_stream_state *stream = NULL;
6198 struct drm_connector *connector;
6199 struct drm_connector_state *new_con_state, *old_con_state;
6200 struct amdgpu_dm_connector *aconnector;
6201 struct dm_connector_state *dm_conn_state;
6202 int i, j, clock, bpp;
6203 int vcpi, pbn_div, pbn = 0;
6204
6205 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6206
6207 aconnector = to_amdgpu_dm_connector(connector);
6208
6209 if (!aconnector->port)
6210 continue;
6211
6212 if (!new_con_state || !new_con_state->crtc)
6213 continue;
6214
6215 dm_conn_state = to_dm_connector_state(new_con_state);
6216
6217 for (j = 0; j < dc_state->stream_count; j++) {
6218 stream = dc_state->streams[j];
6219 if (!stream)
6220 continue;
6221
6222 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6223 break;
6224
6225 stream = NULL;
6226 }
6227
6228 if (!stream)
6229 continue;
6230
6231 if (stream->timing.flags.DSC != 1) {
6232 drm_dp_mst_atomic_enable_dsc(state,
6233 aconnector->port,
6234 dm_conn_state->pbn,
6235 0,
6236 false);
6237 continue;
6238 }
6239
6240 pbn_div = dm_mst_get_pbn_divider(stream->link);
6241 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6242 clock = stream->timing.pix_clk_100hz / 10;
6243 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6244 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6245 aconnector->port,
6246 pbn, pbn_div,
6247 true);
6248 if (vcpi < 0)
6249 return vcpi;
6250
6251 dm_conn_state->pbn = pbn;
6252 dm_conn_state->vcpi_slots = vcpi;
6253 }
6254 return 0;
6255}
d9fe1a4c 6256#endif
29b9ba74 6257
e7b07cee
HW
6258static void dm_drm_plane_reset(struct drm_plane *plane)
6259{
6260 struct dm_plane_state *amdgpu_state = NULL;
6261
6262 if (plane->state)
6263 plane->funcs->atomic_destroy_state(plane, plane->state);
6264
6265 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6266 WARN_ON(amdgpu_state == NULL);
1f6010a9 6267
7ddaef96
NK
6268 if (amdgpu_state)
6269 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6270}
6271
6272static struct drm_plane_state *
6273dm_drm_plane_duplicate_state(struct drm_plane *plane)
6274{
6275 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6276
6277 old_dm_plane_state = to_dm_plane_state(plane->state);
6278 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6279 if (!dm_plane_state)
6280 return NULL;
6281
6282 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6283
3be5262e
HW
6284 if (old_dm_plane_state->dc_state) {
6285 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6286 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6287 }
6288
6289 return &dm_plane_state->base;
6290}
6291
dfd84d90 6292static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6293 struct drm_plane_state *state)
e7b07cee
HW
6294{
6295 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6296
3be5262e
HW
6297 if (dm_plane_state->dc_state)
6298 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6299
0627bbd3 6300 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6301}
6302
6303static const struct drm_plane_funcs dm_plane_funcs = {
6304 .update_plane = drm_atomic_helper_update_plane,
6305 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6306 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6307 .reset = dm_drm_plane_reset,
6308 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6309 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6310 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6311};
6312
3ee6b26b
AD
6313static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6314 struct drm_plane_state *new_state)
e7b07cee
HW
6315{
6316 struct amdgpu_framebuffer *afb;
6317 struct drm_gem_object *obj;
5d43be0c 6318 struct amdgpu_device *adev;
e7b07cee 6319 struct amdgpu_bo *rbo;
e7b07cee 6320 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6321 struct list_head list;
6322 struct ttm_validate_buffer tv;
6323 struct ww_acquire_ctx ticket;
5d43be0c
CK
6324 uint32_t domain;
6325 int r;
e7b07cee
HW
6326
6327 if (!new_state->fb) {
f1ad2f5e 6328 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
6329 return 0;
6330 }
6331
6332 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6333 obj = new_state->fb->obj[0];
e7b07cee 6334 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6335 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6336 INIT_LIST_HEAD(&list);
6337
6338 tv.bo = &rbo->tbo;
6339 tv.num_shared = 1;
6340 list_add(&tv.head, &list);
6341
9165fb87 6342 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6343 if (r) {
6344 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6345 return r;
0f257b09 6346 }
e7b07cee 6347
5d43be0c 6348 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6349 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6350 else
6351 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6352
7b7c6c81 6353 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6354 if (unlikely(r != 0)) {
30b7c614
HW
6355 if (r != -ERESTARTSYS)
6356 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6357 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6358 return r;
6359 }
6360
bb812f1e
JZ
6361 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6362 if (unlikely(r != 0)) {
6363 amdgpu_bo_unpin(rbo);
0f257b09 6364 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6365 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6366 return r;
6367 }
7df7e505 6368
0f257b09 6369 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6370
7b7c6c81 6371 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6372
6373 amdgpu_bo_ref(rbo);
6374
cf322b49
NK
6375 /**
6376 * We don't do surface updates on planes that have been newly created,
6377 * but we also don't have the afb->address during atomic check.
6378 *
6379 * Fill in buffer attributes depending on the address here, but only on
6380 * newly created planes since they're not being used by DC yet and this
6381 * won't modify global state.
6382 */
6383 dm_plane_state_old = to_dm_plane_state(plane->state);
6384 dm_plane_state_new = to_dm_plane_state(new_state);
6385
3be5262e 6386 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6387 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6388 struct dc_plane_state *plane_state =
6389 dm_plane_state_new->dc_state;
6390 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6391
320932bf 6392 fill_plane_buffer_attributes(
695af5f9 6393 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6394 afb->tiling_flags,
cf322b49
NK
6395 &plane_state->tiling_info, &plane_state->plane_size,
6396 &plane_state->dcc, &plane_state->address,
6eed95b0 6397 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6398 }
6399
e7b07cee
HW
6400 return 0;
6401}
6402
3ee6b26b
AD
6403static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6404 struct drm_plane_state *old_state)
e7b07cee
HW
6405{
6406 struct amdgpu_bo *rbo;
e7b07cee
HW
6407 int r;
6408
6409 if (!old_state->fb)
6410 return;
6411
e68d14dd 6412 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6413 r = amdgpu_bo_reserve(rbo, false);
6414 if (unlikely(r)) {
6415 DRM_ERROR("failed to reserve rbo before unpin\n");
6416 return;
b830ebc9
HW
6417 }
6418
6419 amdgpu_bo_unpin(rbo);
6420 amdgpu_bo_unreserve(rbo);
6421 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6422}
6423
8c44515b
AP
6424static int dm_plane_helper_check_state(struct drm_plane_state *state,
6425 struct drm_crtc_state *new_crtc_state)
6426{
6427 int max_downscale = 0;
6428 int max_upscale = INT_MAX;
6429
6430 /* TODO: These should be checked against DC plane caps */
6431 return drm_atomic_helper_check_plane_state(
6432 state, new_crtc_state, max_downscale, max_upscale, true, true);
6433}
6434
7578ecda
AD
6435static int dm_plane_atomic_check(struct drm_plane *plane,
6436 struct drm_plane_state *state)
cbd19488 6437{
1348969a 6438 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6439 struct dc *dc = adev->dm.dc;
78171832 6440 struct dm_plane_state *dm_plane_state;
695af5f9 6441 struct dc_scaling_info scaling_info;
8c44515b 6442 struct drm_crtc_state *new_crtc_state;
695af5f9 6443 int ret;
78171832 6444
e8a98235
RS
6445 trace_amdgpu_dm_plane_atomic_check(state);
6446
78171832 6447 dm_plane_state = to_dm_plane_state(state);
cbd19488 6448
3be5262e 6449 if (!dm_plane_state->dc_state)
9a3329b1 6450 return 0;
cbd19488 6451
8c44515b
AP
6452 new_crtc_state =
6453 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6454 if (!new_crtc_state)
6455 return -EINVAL;
6456
6457 ret = dm_plane_helper_check_state(state, new_crtc_state);
6458 if (ret)
6459 return ret;
6460
695af5f9
NK
6461 ret = fill_dc_scaling_info(state, &scaling_info);
6462 if (ret)
6463 return ret;
a05bcff1 6464
62c933f9 6465 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6466 return 0;
6467
6468 return -EINVAL;
6469}
6470
674e78ac
NK
6471static int dm_plane_atomic_async_check(struct drm_plane *plane,
6472 struct drm_plane_state *new_plane_state)
6473{
6474 /* Only support async updates on cursor planes. */
6475 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6476 return -EINVAL;
6477
6478 return 0;
6479}
6480
6481static void dm_plane_atomic_async_update(struct drm_plane *plane,
6482 struct drm_plane_state *new_state)
6483{
6484 struct drm_plane_state *old_state =
6485 drm_atomic_get_old_plane_state(new_state->state, plane);
6486
e8a98235
RS
6487 trace_amdgpu_dm_atomic_update_cursor(new_state);
6488
332af874 6489 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6490
6491 plane->state->src_x = new_state->src_x;
6492 plane->state->src_y = new_state->src_y;
6493 plane->state->src_w = new_state->src_w;
6494 plane->state->src_h = new_state->src_h;
6495 plane->state->crtc_x = new_state->crtc_x;
6496 plane->state->crtc_y = new_state->crtc_y;
6497 plane->state->crtc_w = new_state->crtc_w;
6498 plane->state->crtc_h = new_state->crtc_h;
6499
6500 handle_cursor_update(plane, old_state);
6501}
6502
e7b07cee
HW
6503static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6504 .prepare_fb = dm_plane_helper_prepare_fb,
6505 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6506 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6507 .atomic_async_check = dm_plane_atomic_async_check,
6508 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6509};
6510
6511/*
6512 * TODO: these are currently initialized to rgb formats only.
6513 * For future use cases we should either initialize them dynamically based on
6514 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6515 * check will succeed, and let DC implement proper check
e7b07cee 6516 */
d90371b0 6517static const uint32_t rgb_formats[] = {
e7b07cee
HW
6518 DRM_FORMAT_XRGB8888,
6519 DRM_FORMAT_ARGB8888,
6520 DRM_FORMAT_RGBA8888,
6521 DRM_FORMAT_XRGB2101010,
6522 DRM_FORMAT_XBGR2101010,
6523 DRM_FORMAT_ARGB2101010,
6524 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6525 DRM_FORMAT_XBGR8888,
6526 DRM_FORMAT_ABGR8888,
46dd9ff7 6527 DRM_FORMAT_RGB565,
e7b07cee
HW
6528};
6529
0d579c7e
NK
6530static const uint32_t overlay_formats[] = {
6531 DRM_FORMAT_XRGB8888,
6532 DRM_FORMAT_ARGB8888,
6533 DRM_FORMAT_RGBA8888,
6534 DRM_FORMAT_XBGR8888,
6535 DRM_FORMAT_ABGR8888,
7267a1a9 6536 DRM_FORMAT_RGB565
e7b07cee
HW
6537};
6538
6539static const u32 cursor_formats[] = {
6540 DRM_FORMAT_ARGB8888
6541};
6542
37c6a93b
NK
6543static int get_plane_formats(const struct drm_plane *plane,
6544 const struct dc_plane_cap *plane_cap,
6545 uint32_t *formats, int max_formats)
e7b07cee 6546{
37c6a93b
NK
6547 int i, num_formats = 0;
6548
6549 /*
6550 * TODO: Query support for each group of formats directly from
6551 * DC plane caps. This will require adding more formats to the
6552 * caps list.
6553 */
e7b07cee 6554
f180b4bc 6555 switch (plane->type) {
e7b07cee 6556 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6557 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6558 if (num_formats >= max_formats)
6559 break;
6560
6561 formats[num_formats++] = rgb_formats[i];
6562 }
6563
ea36ad34 6564 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6565 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6566 if (plane_cap && plane_cap->pixel_format_support.p010)
6567 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6568 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6569 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6570 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6571 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6572 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6573 }
e7b07cee 6574 break;
37c6a93b 6575
e7b07cee 6576 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6577 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6578 if (num_formats >= max_formats)
6579 break;
6580
6581 formats[num_formats++] = overlay_formats[i];
6582 }
e7b07cee 6583 break;
37c6a93b 6584
e7b07cee 6585 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6586 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6587 if (num_formats >= max_formats)
6588 break;
6589
6590 formats[num_formats++] = cursor_formats[i];
6591 }
e7b07cee
HW
6592 break;
6593 }
6594
37c6a93b
NK
6595 return num_formats;
6596}
6597
6598static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6599 struct drm_plane *plane,
6600 unsigned long possible_crtcs,
6601 const struct dc_plane_cap *plane_cap)
6602{
6603 uint32_t formats[32];
6604 int num_formats;
6605 int res = -EPERM;
ecc874a6 6606 unsigned int supported_rotations;
faa37f54 6607 uint64_t *modifiers = NULL;
37c6a93b
NK
6608
6609 num_formats = get_plane_formats(plane, plane_cap, formats,
6610 ARRAY_SIZE(formats));
6611
faa37f54
BN
6612 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6613 if (res)
6614 return res;
6615
4a580877 6616 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 6617 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
6618 modifiers, plane->type, NULL);
6619 kfree(modifiers);
37c6a93b
NK
6620 if (res)
6621 return res;
6622
cc1fec57
NK
6623 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6624 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6625 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6626 BIT(DRM_MODE_BLEND_PREMULTI);
6627
6628 drm_plane_create_alpha_property(plane);
6629 drm_plane_create_blend_mode_property(plane, blend_caps);
6630 }
6631
fc8e5230 6632 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6633 plane_cap &&
6634 (plane_cap->pixel_format_support.nv12 ||
6635 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6636 /* This only affects YUV formats. */
6637 drm_plane_create_color_properties(
6638 plane,
6639 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6640 BIT(DRM_COLOR_YCBCR_BT709) |
6641 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6642 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6643 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6644 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6645 }
6646
ecc874a6
PLG
6647 supported_rotations =
6648 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6649 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6650
1347385f
SS
6651 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6652 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
6653 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6654 supported_rotations);
ecc874a6 6655
f180b4bc 6656 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6657
96719c54 6658 /* Create (reset) the plane state */
f180b4bc
HW
6659 if (plane->funcs->reset)
6660 plane->funcs->reset(plane);
96719c54 6661
37c6a93b 6662 return 0;
e7b07cee
HW
6663}
6664
c920888c
WL
6665#ifdef CONFIG_DEBUG_FS
6666static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6667 struct amdgpu_crtc *acrtc)
6668{
6669 drm_object_attach_property(&acrtc->base.base,
6670 dm->crc_win_x_start_property,
6671 0);
6672 drm_object_attach_property(&acrtc->base.base,
6673 dm->crc_win_y_start_property,
6674 0);
6675 drm_object_attach_property(&acrtc->base.base,
6676 dm->crc_win_x_end_property,
6677 0);
6678 drm_object_attach_property(&acrtc->base.base,
6679 dm->crc_win_y_end_property,
6680 0);
6681}
6682#endif
6683
7578ecda
AD
6684static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6685 struct drm_plane *plane,
6686 uint32_t crtc_index)
e7b07cee
HW
6687{
6688 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6689 struct drm_plane *cursor_plane;
e7b07cee
HW
6690
6691 int res = -ENOMEM;
6692
6693 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6694 if (!cursor_plane)
6695 goto fail;
6696
f180b4bc 6697 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6698 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6699
6700 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6701 if (!acrtc)
6702 goto fail;
6703
6704 res = drm_crtc_init_with_planes(
6705 dm->ddev,
6706 &acrtc->base,
6707 plane,
f180b4bc 6708 cursor_plane,
e7b07cee
HW
6709 &amdgpu_dm_crtc_funcs, NULL);
6710
6711 if (res)
6712 goto fail;
6713
6714 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6715
96719c54
HW
6716 /* Create (reset) the plane state */
6717 if (acrtc->base.funcs->reset)
6718 acrtc->base.funcs->reset(&acrtc->base);
6719
e7b07cee
HW
6720 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6721 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6722
6723 acrtc->crtc_id = crtc_index;
6724 acrtc->base.enabled = false;
c37e2d29 6725 acrtc->otg_inst = -1;
e7b07cee
HW
6726
6727 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6728 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6729 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6730 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
c920888c
WL
6731#ifdef CONFIG_DEBUG_FS
6732 attach_crtc_crc_properties(dm, acrtc);
6733#endif
e7b07cee
HW
6734 return 0;
6735
6736fail:
b830ebc9
HW
6737 kfree(acrtc);
6738 kfree(cursor_plane);
e7b07cee
HW
6739 return res;
6740}
6741
6742
6743static int to_drm_connector_type(enum signal_type st)
6744{
6745 switch (st) {
6746 case SIGNAL_TYPE_HDMI_TYPE_A:
6747 return DRM_MODE_CONNECTOR_HDMIA;
6748 case SIGNAL_TYPE_EDP:
6749 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6750 case SIGNAL_TYPE_LVDS:
6751 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6752 case SIGNAL_TYPE_RGB:
6753 return DRM_MODE_CONNECTOR_VGA;
6754 case SIGNAL_TYPE_DISPLAY_PORT:
6755 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6756 return DRM_MODE_CONNECTOR_DisplayPort;
6757 case SIGNAL_TYPE_DVI_DUAL_LINK:
6758 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6759 return DRM_MODE_CONNECTOR_DVID;
6760 case SIGNAL_TYPE_VIRTUAL:
6761 return DRM_MODE_CONNECTOR_VIRTUAL;
6762
6763 default:
6764 return DRM_MODE_CONNECTOR_Unknown;
6765 }
6766}
6767
2b4c1c05
DV
6768static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6769{
62afb4ad
JRS
6770 struct drm_encoder *encoder;
6771
6772 /* There is only one encoder per connector */
6773 drm_connector_for_each_possible_encoder(connector, encoder)
6774 return encoder;
6775
6776 return NULL;
2b4c1c05
DV
6777}
6778
e7b07cee
HW
6779static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6780{
e7b07cee
HW
6781 struct drm_encoder *encoder;
6782 struct amdgpu_encoder *amdgpu_encoder;
6783
2b4c1c05 6784 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6785
6786 if (encoder == NULL)
6787 return;
6788
6789 amdgpu_encoder = to_amdgpu_encoder(encoder);
6790
6791 amdgpu_encoder->native_mode.clock = 0;
6792
6793 if (!list_empty(&connector->probed_modes)) {
6794 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6795
e7b07cee 6796 list_for_each_entry(preferred_mode,
b830ebc9
HW
6797 &connector->probed_modes,
6798 head) {
6799 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6800 amdgpu_encoder->native_mode = *preferred_mode;
6801
e7b07cee
HW
6802 break;
6803 }
6804
6805 }
6806}
6807
3ee6b26b
AD
6808static struct drm_display_mode *
6809amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6810 char *name,
6811 int hdisplay, int vdisplay)
e7b07cee
HW
6812{
6813 struct drm_device *dev = encoder->dev;
6814 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6815 struct drm_display_mode *mode = NULL;
6816 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6817
6818 mode = drm_mode_duplicate(dev, native_mode);
6819
b830ebc9 6820 if (mode == NULL)
e7b07cee
HW
6821 return NULL;
6822
6823 mode->hdisplay = hdisplay;
6824 mode->vdisplay = vdisplay;
6825 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6826 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6827
6828 return mode;
6829
6830}
6831
6832static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6833 struct drm_connector *connector)
e7b07cee
HW
6834{
6835 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6836 struct drm_display_mode *mode = NULL;
6837 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6838 struct amdgpu_dm_connector *amdgpu_dm_connector =
6839 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6840 int i;
6841 int n;
6842 struct mode_size {
6843 char name[DRM_DISPLAY_MODE_LEN];
6844 int w;
6845 int h;
b830ebc9 6846 } common_modes[] = {
e7b07cee
HW
6847 { "640x480", 640, 480},
6848 { "800x600", 800, 600},
6849 { "1024x768", 1024, 768},
6850 { "1280x720", 1280, 720},
6851 { "1280x800", 1280, 800},
6852 {"1280x1024", 1280, 1024},
6853 { "1440x900", 1440, 900},
6854 {"1680x1050", 1680, 1050},
6855 {"1600x1200", 1600, 1200},
6856 {"1920x1080", 1920, 1080},
6857 {"1920x1200", 1920, 1200}
6858 };
6859
b830ebc9 6860 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6861
6862 for (i = 0; i < n; i++) {
6863 struct drm_display_mode *curmode = NULL;
6864 bool mode_existed = false;
6865
6866 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6867 common_modes[i].h > native_mode->vdisplay ||
6868 (common_modes[i].w == native_mode->hdisplay &&
6869 common_modes[i].h == native_mode->vdisplay))
6870 continue;
e7b07cee
HW
6871
6872 list_for_each_entry(curmode, &connector->probed_modes, head) {
6873 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6874 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6875 mode_existed = true;
6876 break;
6877 }
6878 }
6879
6880 if (mode_existed)
6881 continue;
6882
6883 mode = amdgpu_dm_create_common_mode(encoder,
6884 common_modes[i].name, common_modes[i].w,
6885 common_modes[i].h);
6886 drm_mode_probed_add(connector, mode);
c84dec2f 6887 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6888 }
6889}
6890
3ee6b26b
AD
6891static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6892 struct edid *edid)
e7b07cee 6893{
c84dec2f
HW
6894 struct amdgpu_dm_connector *amdgpu_dm_connector =
6895 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6896
6897 if (edid) {
6898 /* empty probed_modes */
6899 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6900 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6901 drm_add_edid_modes(connector, edid);
6902
f1e5e913
YMM
6903 /* sorting the probed modes before calling function
6904 * amdgpu_dm_get_native_mode() since EDID can have
6905 * more than one preferred mode. The modes that are
6906 * later in the probed mode list could be of higher
6907 * and preferred resolution. For example, 3840x2160
6908 * resolution in base EDID preferred timing and 4096x2160
6909 * preferred resolution in DID extension block later.
6910 */
6911 drm_mode_sort(&connector->probed_modes);
e7b07cee 6912 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6913 } else {
c84dec2f 6914 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6915 }
e7b07cee
HW
6916}
6917
7578ecda 6918static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6919{
c84dec2f
HW
6920 struct amdgpu_dm_connector *amdgpu_dm_connector =
6921 to_amdgpu_dm_connector(connector);
e7b07cee 6922 struct drm_encoder *encoder;
c84dec2f 6923 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6924
2b4c1c05 6925 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6926
5c0e6840 6927 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
6928 amdgpu_dm_connector->num_modes =
6929 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6930 } else {
6931 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6932 amdgpu_dm_connector_add_common_modes(encoder, connector);
6933 }
3e332d3a 6934 amdgpu_dm_fbc_init(connector);
5099114b 6935
c84dec2f 6936 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6937}
6938
3ee6b26b
AD
6939void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6940 struct amdgpu_dm_connector *aconnector,
6941 int connector_type,
6942 struct dc_link *link,
6943 int link_index)
e7b07cee 6944{
1348969a 6945 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 6946
f04bee34
NK
6947 /*
6948 * Some of the properties below require access to state, like bpc.
6949 * Allocate some default initial connector state with our reset helper.
6950 */
6951 if (aconnector->base.funcs->reset)
6952 aconnector->base.funcs->reset(&aconnector->base);
6953
e7b07cee
HW
6954 aconnector->connector_id = link_index;
6955 aconnector->dc_link = link;
6956 aconnector->base.interlace_allowed = false;
6957 aconnector->base.doublescan_allowed = false;
6958 aconnector->base.stereo_allowed = false;
6959 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6960 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6961 aconnector->audio_inst = -1;
e7b07cee
HW
6962 mutex_init(&aconnector->hpd_lock);
6963
1f6010a9
DF
6964 /*
6965 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6966 * which means HPD hot plug not supported
6967 */
e7b07cee
HW
6968 switch (connector_type) {
6969 case DRM_MODE_CONNECTOR_HDMIA:
6970 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6971 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6972 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6973 break;
6974 case DRM_MODE_CONNECTOR_DisplayPort:
6975 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6976 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6977 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6978 break;
6979 case DRM_MODE_CONNECTOR_DVID:
6980 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6981 break;
6982 default:
6983 break;
6984 }
6985
6986 drm_object_attach_property(&aconnector->base.base,
6987 dm->ddev->mode_config.scaling_mode_property,
6988 DRM_MODE_SCALE_NONE);
6989
6990 drm_object_attach_property(&aconnector->base.base,
6991 adev->mode_info.underscan_property,
6992 UNDERSCAN_OFF);
6993 drm_object_attach_property(&aconnector->base.base,
6994 adev->mode_info.underscan_hborder_property,
6995 0);
6996 drm_object_attach_property(&aconnector->base.base,
6997 adev->mode_info.underscan_vborder_property,
6998 0);
1825fd34 6999
8c61b31e
JFZ
7000 if (!aconnector->mst_port)
7001 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7002
4a8ca46b
RL
7003 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7004 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7005 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7006
c1ee92f9 7007 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7008 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7009 drm_object_attach_property(&aconnector->base.base,
7010 adev->mode_info.abm_level_property, 0);
7011 }
bb47de73
NK
7012
7013 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7014 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7015 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
7016 drm_object_attach_property(
7017 &aconnector->base.base,
7018 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7019
8c61b31e
JFZ
7020 if (!aconnector->mst_port)
7021 drm_connector_attach_vrr_capable_property(&aconnector->base);
7022
0c8620d6 7023#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7024 if (adev->dm.hdcp_workqueue)
53e108aa 7025 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7026#endif
bb47de73 7027 }
e7b07cee
HW
7028}
7029
7578ecda
AD
7030static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7031 struct i2c_msg *msgs, int num)
e7b07cee
HW
7032{
7033 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7034 struct ddc_service *ddc_service = i2c->ddc_service;
7035 struct i2c_command cmd;
7036 int i;
7037 int result = -EIO;
7038
b830ebc9 7039 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7040
7041 if (!cmd.payloads)
7042 return result;
7043
7044 cmd.number_of_payloads = num;
7045 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7046 cmd.speed = 100;
7047
7048 for (i = 0; i < num; i++) {
7049 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7050 cmd.payloads[i].address = msgs[i].addr;
7051 cmd.payloads[i].length = msgs[i].len;
7052 cmd.payloads[i].data = msgs[i].buf;
7053 }
7054
c85e6e54
DF
7055 if (dc_submit_i2c(
7056 ddc_service->ctx->dc,
7057 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7058 &cmd))
7059 result = num;
7060
7061 kfree(cmd.payloads);
7062 return result;
7063}
7064
7578ecda 7065static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7066{
7067 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7068}
7069
7070static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7071 .master_xfer = amdgpu_dm_i2c_xfer,
7072 .functionality = amdgpu_dm_i2c_func,
7073};
7074
3ee6b26b
AD
7075static struct amdgpu_i2c_adapter *
7076create_i2c(struct ddc_service *ddc_service,
7077 int link_index,
7078 int *res)
e7b07cee
HW
7079{
7080 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7081 struct amdgpu_i2c_adapter *i2c;
7082
b830ebc9 7083 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7084 if (!i2c)
7085 return NULL;
e7b07cee
HW
7086 i2c->base.owner = THIS_MODULE;
7087 i2c->base.class = I2C_CLASS_DDC;
7088 i2c->base.dev.parent = &adev->pdev->dev;
7089 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7090 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7091 i2c_set_adapdata(&i2c->base, i2c);
7092 i2c->ddc_service = ddc_service;
c85e6e54 7093 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7094
7095 return i2c;
7096}
7097
89fc8d4e 7098
1f6010a9
DF
7099/*
7100 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7101 * dc_link which will be represented by this aconnector.
7102 */
7578ecda
AD
7103static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7104 struct amdgpu_dm_connector *aconnector,
7105 uint32_t link_index,
7106 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7107{
7108 int res = 0;
7109 int connector_type;
7110 struct dc *dc = dm->dc;
7111 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7112 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7113
7114 link->priv = aconnector;
e7b07cee 7115
f1ad2f5e 7116 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7117
7118 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7119 if (!i2c) {
7120 DRM_ERROR("Failed to create i2c adapter data\n");
7121 return -ENOMEM;
7122 }
7123
e7b07cee
HW
7124 aconnector->i2c = i2c;
7125 res = i2c_add_adapter(&i2c->base);
7126
7127 if (res) {
7128 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7129 goto out_free;
7130 }
7131
7132 connector_type = to_drm_connector_type(link->connector_signal);
7133
17165de2 7134 res = drm_connector_init_with_ddc(
e7b07cee
HW
7135 dm->ddev,
7136 &aconnector->base,
7137 &amdgpu_dm_connector_funcs,
17165de2
AP
7138 connector_type,
7139 &i2c->base);
e7b07cee
HW
7140
7141 if (res) {
7142 DRM_ERROR("connector_init failed\n");
7143 aconnector->connector_id = -1;
7144 goto out_free;
7145 }
7146
7147 drm_connector_helper_add(
7148 &aconnector->base,
7149 &amdgpu_dm_connector_helper_funcs);
7150
7151 amdgpu_dm_connector_init_helper(
7152 dm,
7153 aconnector,
7154 connector_type,
7155 link,
7156 link_index);
7157
cde4c44d 7158 drm_connector_attach_encoder(
e7b07cee
HW
7159 &aconnector->base, &aencoder->base);
7160
e7b07cee
HW
7161 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7162 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7163 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7164
e7b07cee
HW
7165out_free:
7166 if (res) {
7167 kfree(i2c);
7168 aconnector->i2c = NULL;
7169 }
7170 return res;
7171}
7172
7173int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7174{
7175 switch (adev->mode_info.num_crtc) {
7176 case 1:
7177 return 0x1;
7178 case 2:
7179 return 0x3;
7180 case 3:
7181 return 0x7;
7182 case 4:
7183 return 0xf;
7184 case 5:
7185 return 0x1f;
7186 case 6:
7187 default:
7188 return 0x3f;
7189 }
7190}
7191
7578ecda
AD
7192static int amdgpu_dm_encoder_init(struct drm_device *dev,
7193 struct amdgpu_encoder *aencoder,
7194 uint32_t link_index)
e7b07cee 7195{
1348969a 7196 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7197
7198 int res = drm_encoder_init(dev,
7199 &aencoder->base,
7200 &amdgpu_dm_encoder_funcs,
7201 DRM_MODE_ENCODER_TMDS,
7202 NULL);
7203
7204 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7205
7206 if (!res)
7207 aencoder->encoder_id = link_index;
7208 else
7209 aencoder->encoder_id = -1;
7210
7211 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7212
7213 return res;
7214}
7215
3ee6b26b
AD
7216static void manage_dm_interrupts(struct amdgpu_device *adev,
7217 struct amdgpu_crtc *acrtc,
7218 bool enable)
e7b07cee
HW
7219{
7220 /*
8fe684e9
NK
7221 * We have no guarantee that the frontend index maps to the same
7222 * backend index - some even map to more than one.
7223 *
7224 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7225 */
7226 int irq_type =
734dd01d 7227 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7228 adev,
7229 acrtc->crtc_id);
7230
7231 if (enable) {
7232 drm_crtc_vblank_on(&acrtc->base);
7233 amdgpu_irq_get(
7234 adev,
7235 &adev->pageflip_irq,
7236 irq_type);
7237 } else {
7238
7239 amdgpu_irq_put(
7240 adev,
7241 &adev->pageflip_irq,
7242 irq_type);
7243 drm_crtc_vblank_off(&acrtc->base);
7244 }
7245}
7246
8fe684e9
NK
7247static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7248 struct amdgpu_crtc *acrtc)
7249{
7250 int irq_type =
7251 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7252
7253 /**
7254 * This reads the current state for the IRQ and force reapplies
7255 * the setting to hardware.
7256 */
7257 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7258}
7259
3ee6b26b
AD
7260static bool
7261is_scaling_state_different(const struct dm_connector_state *dm_state,
7262 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7263{
7264 if (dm_state->scaling != old_dm_state->scaling)
7265 return true;
7266 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7267 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7268 return true;
7269 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7270 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7271 return true;
b830ebc9
HW
7272 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7273 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7274 return true;
e7b07cee
HW
7275 return false;
7276}
7277
0c8620d6
BL
7278#ifdef CONFIG_DRM_AMD_DC_HDCP
7279static bool is_content_protection_different(struct drm_connector_state *state,
7280 const struct drm_connector_state *old_state,
7281 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7282{
7283 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7284 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7285
31c0ed90 7286 /* Handle: Type0/1 change */
53e108aa
BL
7287 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7288 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7289 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7290 return true;
7291 }
7292
31c0ed90
BL
7293 /* CP is being re enabled, ignore this
7294 *
7295 * Handles: ENABLED -> DESIRED
7296 */
0c8620d6
BL
7297 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7298 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7299 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7300 return false;
7301 }
7302
31c0ed90
BL
7303 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7304 *
7305 * Handles: UNDESIRED -> ENABLED
7306 */
0c8620d6
BL
7307 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7308 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7309 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7310
7311 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7312 * hot-plug, headless s3, dpms
31c0ed90
BL
7313 *
7314 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7315 */
97f6c917
BL
7316 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7317 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7318 dm_con_state->update_hdcp = false;
0c8620d6 7319 return true;
97f6c917 7320 }
0c8620d6 7321
31c0ed90
BL
7322 /*
7323 * Handles: UNDESIRED -> UNDESIRED
7324 * DESIRED -> DESIRED
7325 * ENABLED -> ENABLED
7326 */
0c8620d6
BL
7327 if (old_state->content_protection == state->content_protection)
7328 return false;
7329
31c0ed90
BL
7330 /*
7331 * Handles: UNDESIRED -> DESIRED
7332 * DESIRED -> UNDESIRED
7333 * ENABLED -> UNDESIRED
7334 */
97f6c917 7335 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7336 return true;
7337
31c0ed90
BL
7338 /*
7339 * Handles: DESIRED -> ENABLED
7340 */
0c8620d6
BL
7341 return false;
7342}
7343
0c8620d6 7344#endif
3ee6b26b
AD
7345static void remove_stream(struct amdgpu_device *adev,
7346 struct amdgpu_crtc *acrtc,
7347 struct dc_stream_state *stream)
e7b07cee
HW
7348{
7349 /* this is the update mode case */
e7b07cee
HW
7350
7351 acrtc->otg_inst = -1;
7352 acrtc->enabled = false;
7353}
7354
7578ecda
AD
7355static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7356 struct dc_cursor_position *position)
2a8f6ccb 7357{
f4c2cc43 7358 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
7359 int x, y;
7360 int xorigin = 0, yorigin = 0;
7361
e371e19c
NK
7362 position->enable = false;
7363 position->x = 0;
7364 position->y = 0;
7365
7366 if (!crtc || !plane->state->fb)
2a8f6ccb 7367 return 0;
2a8f6ccb
HW
7368
7369 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7370 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7371 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7372 __func__,
7373 plane->state->crtc_w,
7374 plane->state->crtc_h);
7375 return -EINVAL;
7376 }
7377
7378 x = plane->state->crtc_x;
7379 y = plane->state->crtc_y;
c14a005c 7380
e371e19c
NK
7381 if (x <= -amdgpu_crtc->max_cursor_width ||
7382 y <= -amdgpu_crtc->max_cursor_height)
7383 return 0;
7384
2a8f6ccb
HW
7385 if (x < 0) {
7386 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7387 x = 0;
7388 }
7389 if (y < 0) {
7390 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7391 y = 0;
7392 }
7393 position->enable = true;
d243b6ff 7394 position->translate_by_source = true;
2a8f6ccb
HW
7395 position->x = x;
7396 position->y = y;
7397 position->x_hotspot = xorigin;
7398 position->y_hotspot = yorigin;
7399
7400 return 0;
7401}
7402
3ee6b26b
AD
7403static void handle_cursor_update(struct drm_plane *plane,
7404 struct drm_plane_state *old_plane_state)
e7b07cee 7405{
1348969a 7406 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
7407 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7408 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7409 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7410 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7411 uint64_t address = afb ? afb->address : 0;
7412 struct dc_cursor_position position;
7413 struct dc_cursor_attributes attributes;
7414 int ret;
7415
e7b07cee
HW
7416 if (!plane->state->fb && !old_plane_state->fb)
7417 return;
7418
f1ad2f5e 7419 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
7420 __func__,
7421 amdgpu_crtc->crtc_id,
7422 plane->state->crtc_w,
7423 plane->state->crtc_h);
2a8f6ccb
HW
7424
7425 ret = get_cursor_position(plane, crtc, &position);
7426 if (ret)
7427 return;
7428
7429 if (!position.enable) {
7430 /* turn off cursor */
674e78ac
NK
7431 if (crtc_state && crtc_state->stream) {
7432 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
7433 dc_stream_set_cursor_position(crtc_state->stream,
7434 &position);
674e78ac
NK
7435 mutex_unlock(&adev->dm.dc_lock);
7436 }
2a8f6ccb 7437 return;
e7b07cee 7438 }
e7b07cee 7439
2a8f6ccb
HW
7440 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7441 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7442
c1cefe11 7443 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
7444 attributes.address.high_part = upper_32_bits(address);
7445 attributes.address.low_part = lower_32_bits(address);
7446 attributes.width = plane->state->crtc_w;
7447 attributes.height = plane->state->crtc_h;
7448 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7449 attributes.rotation_angle = 0;
7450 attributes.attribute_flags.value = 0;
7451
03a66367 7452 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 7453
886daac9 7454 if (crtc_state->stream) {
674e78ac 7455 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
7456 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7457 &attributes))
7458 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 7459
2a8f6ccb
HW
7460 if (!dc_stream_set_cursor_position(crtc_state->stream,
7461 &position))
7462 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 7463 mutex_unlock(&adev->dm.dc_lock);
886daac9 7464 }
2a8f6ccb 7465}
e7b07cee
HW
7466
7467static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7468{
7469
7470 assert_spin_locked(&acrtc->base.dev->event_lock);
7471 WARN_ON(acrtc->event);
7472
7473 acrtc->event = acrtc->base.state->event;
7474
7475 /* Set the flip status */
7476 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7477
7478 /* Mark this event as consumed */
7479 acrtc->base.state->event = NULL;
7480
7481 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7482 acrtc->crtc_id);
7483}
7484
bb47de73
NK
7485static void update_freesync_state_on_stream(
7486 struct amdgpu_display_manager *dm,
7487 struct dm_crtc_state *new_crtc_state,
180db303
NK
7488 struct dc_stream_state *new_stream,
7489 struct dc_plane_state *surface,
7490 u32 flip_timestamp_in_us)
bb47de73 7491{
09aef2c4 7492 struct mod_vrr_params vrr_params;
bb47de73 7493 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7494 struct amdgpu_device *adev = dm->adev;
585d450c 7495 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7496 unsigned long flags;
bb47de73
NK
7497
7498 if (!new_stream)
7499 return;
7500
7501 /*
7502 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7503 * For now it's sufficient to just guard against these conditions.
7504 */
7505
7506 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7507 return;
7508
4a580877 7509 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7510 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7511
180db303
NK
7512 if (surface) {
7513 mod_freesync_handle_preflip(
7514 dm->freesync_module,
7515 surface,
7516 new_stream,
7517 flip_timestamp_in_us,
7518 &vrr_params);
09aef2c4
MK
7519
7520 if (adev->family < AMDGPU_FAMILY_AI &&
7521 amdgpu_dm_vrr_active(new_crtc_state)) {
7522 mod_freesync_handle_v_update(dm->freesync_module,
7523 new_stream, &vrr_params);
e63e2491
EB
7524
7525 /* Need to call this before the frame ends. */
7526 dc_stream_adjust_vmin_vmax(dm->dc,
7527 new_crtc_state->stream,
7528 &vrr_params.adjust);
09aef2c4 7529 }
180db303 7530 }
bb47de73
NK
7531
7532 mod_freesync_build_vrr_infopacket(
7533 dm->freesync_module,
7534 new_stream,
180db303 7535 &vrr_params,
ecd0136b
HT
7536 PACKET_TYPE_VRR,
7537 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
7538 &vrr_infopacket);
7539
8a48b44c 7540 new_crtc_state->freesync_timing_changed |=
585d450c 7541 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7542 &vrr_params.adjust,
7543 sizeof(vrr_params.adjust)) != 0);
bb47de73 7544
8a48b44c 7545 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7546 (memcmp(&new_crtc_state->vrr_infopacket,
7547 &vrr_infopacket,
7548 sizeof(vrr_infopacket)) != 0);
7549
585d450c 7550 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7551 new_crtc_state->vrr_infopacket = vrr_infopacket;
7552
585d450c 7553 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7554 new_stream->vrr_infopacket = vrr_infopacket;
7555
7556 if (new_crtc_state->freesync_vrr_info_changed)
7557 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7558 new_crtc_state->base.crtc->base.id,
7559 (int)new_crtc_state->base.vrr_enabled,
180db303 7560 (int)vrr_params.state);
09aef2c4 7561
4a580877 7562 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7563}
7564
585d450c 7565static void update_stream_irq_parameters(
e854194c
MK
7566 struct amdgpu_display_manager *dm,
7567 struct dm_crtc_state *new_crtc_state)
7568{
7569 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7570 struct mod_vrr_params vrr_params;
e854194c 7571 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7572 struct amdgpu_device *adev = dm->adev;
585d450c 7573 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7574 unsigned long flags;
e854194c
MK
7575
7576 if (!new_stream)
7577 return;
7578
7579 /*
7580 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7581 * For now it's sufficient to just guard against these conditions.
7582 */
7583 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7584 return;
7585
4a580877 7586 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7587 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7588
e854194c
MK
7589 if (new_crtc_state->vrr_supported &&
7590 config.min_refresh_in_uhz &&
7591 config.max_refresh_in_uhz) {
7592 config.state = new_crtc_state->base.vrr_enabled ?
7593 VRR_STATE_ACTIVE_VARIABLE :
7594 VRR_STATE_INACTIVE;
7595 } else {
7596 config.state = VRR_STATE_UNSUPPORTED;
7597 }
7598
7599 mod_freesync_build_vrr_params(dm->freesync_module,
7600 new_stream,
7601 &config, &vrr_params);
7602
7603 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7604 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7605 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7606
585d450c
AP
7607 new_crtc_state->freesync_config = config;
7608 /* Copy state for access from DM IRQ handler */
7609 acrtc->dm_irq_params.freesync_config = config;
7610 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7611 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7612 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7613}
7614
66b0c973
MK
7615static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7616 struct dm_crtc_state *new_state)
7617{
7618 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7619 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7620
7621 if (!old_vrr_active && new_vrr_active) {
7622 /* Transition VRR inactive -> active:
7623 * While VRR is active, we must not disable vblank irq, as a
7624 * reenable after disable would compute bogus vblank/pflip
7625 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7626 *
7627 * We also need vupdate irq for the actual core vblank handling
7628 * at end of vblank.
66b0c973 7629 */
d2574c33 7630 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7631 drm_crtc_vblank_get(new_state->base.crtc);
7632 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7633 __func__, new_state->base.crtc->base.id);
7634 } else if (old_vrr_active && !new_vrr_active) {
7635 /* Transition VRR active -> inactive:
7636 * Allow vblank irq disable again for fixed refresh rate.
7637 */
d2574c33 7638 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7639 drm_crtc_vblank_put(new_state->base.crtc);
7640 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7641 __func__, new_state->base.crtc->base.id);
7642 }
7643}
7644
8ad27806
NK
7645static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7646{
7647 struct drm_plane *plane;
7648 struct drm_plane_state *old_plane_state, *new_plane_state;
7649 int i;
7650
7651 /*
7652 * TODO: Make this per-stream so we don't issue redundant updates for
7653 * commits with multiple streams.
7654 */
7655 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7656 new_plane_state, i)
7657 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7658 handle_cursor_update(plane, old_plane_state);
7659}
7660
3be5262e 7661static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7662 struct dc_state *dc_state,
3ee6b26b
AD
7663 struct drm_device *dev,
7664 struct amdgpu_display_manager *dm,
7665 struct drm_crtc *pcrtc,
420cd472 7666 bool wait_for_vblank)
e7b07cee 7667{
570c91d5 7668 uint32_t i;
8a48b44c 7669 uint64_t timestamp_ns;
e7b07cee 7670 struct drm_plane *plane;
0bc9706d 7671 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7672 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7673 struct drm_crtc_state *new_pcrtc_state =
7674 drm_atomic_get_new_crtc_state(state, pcrtc);
7675 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7676 struct dm_crtc_state *dm_old_crtc_state =
7677 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7678 int planes_count = 0, vpos, hpos;
570c91d5 7679 long r;
e7b07cee 7680 unsigned long flags;
8a48b44c 7681 struct amdgpu_bo *abo;
fdd1fe57
MK
7682 uint32_t target_vblank, last_flip_vblank;
7683 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7684 bool pflip_present = false;
bc7f670e
DF
7685 struct {
7686 struct dc_surface_update surface_updates[MAX_SURFACES];
7687 struct dc_plane_info plane_infos[MAX_SURFACES];
7688 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7689 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7690 struct dc_stream_update stream_update;
74aa7bd4 7691 } *bundle;
bc7f670e 7692
74aa7bd4 7693 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7694
74aa7bd4
DF
7695 if (!bundle) {
7696 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7697 goto cleanup;
7698 }
e7b07cee 7699
8ad27806
NK
7700 /*
7701 * Disable the cursor first if we're disabling all the planes.
7702 * It'll remain on the screen after the planes are re-enabled
7703 * if we don't.
7704 */
7705 if (acrtc_state->active_planes == 0)
7706 amdgpu_dm_commit_cursors(state);
7707
e7b07cee 7708 /* update planes when needed */
0bc9706d
LSL
7709 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7710 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7711 struct drm_crtc_state *new_crtc_state;
0bc9706d 7712 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7713 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7714 bool plane_needs_flip;
c7af5f77 7715 struct dc_plane_state *dc_plane;
54d76575 7716 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7717
80c218d5
NK
7718 /* Cursor plane is handled after stream updates */
7719 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7720 continue;
e7b07cee 7721
f5ba60fe
DD
7722 if (!fb || !crtc || pcrtc != crtc)
7723 continue;
7724
7725 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7726 if (!new_crtc_state->active)
e7b07cee
HW
7727 continue;
7728
bc7f670e 7729 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7730
74aa7bd4 7731 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7732 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7733 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7734 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7735 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7736 }
8a48b44c 7737
695af5f9
NK
7738 fill_dc_scaling_info(new_plane_state,
7739 &bundle->scaling_infos[planes_count]);
8a48b44c 7740
695af5f9
NK
7741 bundle->surface_updates[planes_count].scaling_info =
7742 &bundle->scaling_infos[planes_count];
8a48b44c 7743
f5031000 7744 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7745
f5031000 7746 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7747
f5031000
DF
7748 if (!plane_needs_flip) {
7749 planes_count += 1;
7750 continue;
7751 }
8a48b44c 7752
2fac0f53
CK
7753 abo = gem_to_amdgpu_bo(fb->obj[0]);
7754
f8308898
AG
7755 /*
7756 * Wait for all fences on this FB. Do limited wait to avoid
7757 * deadlock during GPU reset when this fence will not signal
7758 * but we hold reservation lock for the BO.
7759 */
52791eee 7760 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7761 false,
f8308898
AG
7762 msecs_to_jiffies(5000));
7763 if (unlikely(r <= 0))
ed8a5fb2 7764 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7765
695af5f9 7766 fill_dc_plane_info_and_addr(
8ce5d842 7767 dm->adev, new_plane_state,
6eed95b0 7768 afb->tiling_flags,
695af5f9 7769 &bundle->plane_infos[planes_count],
87b7ebc2 7770 &bundle->flip_addrs[planes_count].address,
6eed95b0 7771 afb->tmz_surface, false);
87b7ebc2
RS
7772
7773 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7774 new_plane_state->plane->index,
7775 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7776
7777 bundle->surface_updates[planes_count].plane_info =
7778 &bundle->plane_infos[planes_count];
8a48b44c 7779
caff0e66
NK
7780 /*
7781 * Only allow immediate flips for fast updates that don't
7782 * change FB pitch, DCC state, rotation or mirroing.
7783 */
f5031000 7784 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7785 crtc->state->async_flip &&
caff0e66 7786 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7787
f5031000
DF
7788 timestamp_ns = ktime_get_ns();
7789 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7790 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7791 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7792
f5031000
DF
7793 if (!bundle->surface_updates[planes_count].surface) {
7794 DRM_ERROR("No surface for CRTC: id=%d\n",
7795 acrtc_attach->crtc_id);
7796 continue;
bc7f670e
DF
7797 }
7798
f5031000
DF
7799 if (plane == pcrtc->primary)
7800 update_freesync_state_on_stream(
7801 dm,
7802 acrtc_state,
7803 acrtc_state->stream,
7804 dc_plane,
7805 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7806
f5031000
DF
7807 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7808 __func__,
7809 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7810 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7811
7812 planes_count += 1;
7813
8a48b44c
DF
7814 }
7815
74aa7bd4 7816 if (pflip_present) {
634092b1
MK
7817 if (!vrr_active) {
7818 /* Use old throttling in non-vrr fixed refresh rate mode
7819 * to keep flip scheduling based on target vblank counts
7820 * working in a backwards compatible way, e.g., for
7821 * clients using the GLX_OML_sync_control extension or
7822 * DRI3/Present extension with defined target_msc.
7823 */
e3eff4b5 7824 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7825 }
7826 else {
7827 /* For variable refresh rate mode only:
7828 * Get vblank of last completed flip to avoid > 1 vrr
7829 * flips per video frame by use of throttling, but allow
7830 * flip programming anywhere in the possibly large
7831 * variable vrr vblank interval for fine-grained flip
7832 * timing control and more opportunity to avoid stutter
7833 * on late submission of flips.
7834 */
7835 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 7836 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
7837 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7838 }
7839
fdd1fe57 7840 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7841
7842 /*
7843 * Wait until we're out of the vertical blank period before the one
7844 * targeted by the flip
7845 */
7846 while ((acrtc_attach->enabled &&
7847 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7848 0, &vpos, &hpos, NULL,
7849 NULL, &pcrtc->hwmode)
7850 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7851 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7852 (int)(target_vblank -
e3eff4b5 7853 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7854 usleep_range(1000, 1100);
7855 }
7856
8fe684e9
NK
7857 /**
7858 * Prepare the flip event for the pageflip interrupt to handle.
7859 *
7860 * This only works in the case where we've already turned on the
7861 * appropriate hardware blocks (eg. HUBP) so in the transition case
7862 * from 0 -> n planes we have to skip a hardware generated event
7863 * and rely on sending it from software.
7864 */
7865 if (acrtc_attach->base.state->event &&
7866 acrtc_state->active_planes > 0) {
8a48b44c
DF
7867 drm_crtc_vblank_get(pcrtc);
7868
7869 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7870
7871 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7872 prepare_flip_isr(acrtc_attach);
7873
7874 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7875 }
7876
7877 if (acrtc_state->stream) {
8a48b44c 7878 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7879 bundle->stream_update.vrr_infopacket =
8a48b44c 7880 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7881 }
e7b07cee
HW
7882 }
7883
bc92c065 7884 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7885 if ((planes_count || acrtc_state->active_planes == 0) &&
7886 acrtc_state->stream) {
b6e881c9 7887 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7888 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7889 bundle->stream_update.src = acrtc_state->stream->src;
7890 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7891 }
7892
cf020d49
NK
7893 if (new_pcrtc_state->color_mgmt_changed) {
7894 /*
7895 * TODO: This isn't fully correct since we've actually
7896 * already modified the stream in place.
7897 */
7898 bundle->stream_update.gamut_remap =
7899 &acrtc_state->stream->gamut_remap_matrix;
7900 bundle->stream_update.output_csc_transform =
7901 &acrtc_state->stream->csc_color_matrix;
7902 bundle->stream_update.out_transfer_func =
7903 acrtc_state->stream->out_transfer_func;
7904 }
bc7f670e 7905
8a48b44c 7906 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7907 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7908 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7909
e63e2491
EB
7910 /*
7911 * If FreeSync state on the stream has changed then we need to
7912 * re-adjust the min/max bounds now that DC doesn't handle this
7913 * as part of commit.
7914 */
7915 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7916 amdgpu_dm_vrr_active(acrtc_state)) {
7917 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7918 dc_stream_adjust_vmin_vmax(
7919 dm->dc, acrtc_state->stream,
585d450c 7920 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
7921 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7922 }
bc7f670e 7923 mutex_lock(&dm->dc_lock);
8c322309 7924 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7925 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7926 amdgpu_dm_psr_disable(acrtc_state->stream);
7927
bc7f670e 7928 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7929 bundle->surface_updates,
bc7f670e
DF
7930 planes_count,
7931 acrtc_state->stream,
74aa7bd4 7932 &bundle->stream_update,
bc7f670e 7933 dc_state);
8c322309 7934
8fe684e9
NK
7935 /**
7936 * Enable or disable the interrupts on the backend.
7937 *
7938 * Most pipes are put into power gating when unused.
7939 *
7940 * When power gating is enabled on a pipe we lose the
7941 * interrupt enablement state when power gating is disabled.
7942 *
7943 * So we need to update the IRQ control state in hardware
7944 * whenever the pipe turns on (since it could be previously
7945 * power gated) or off (since some pipes can't be power gated
7946 * on some ASICs).
7947 */
7948 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
7949 dm_update_pflip_irq_state(drm_to_adev(dev),
7950 acrtc_attach);
8fe684e9 7951
8c322309 7952 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7953 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7954 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7955 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7956 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7957 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7958 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7959 amdgpu_dm_psr_enable(acrtc_state->stream);
7960 }
7961
bc7f670e 7962 mutex_unlock(&dm->dc_lock);
e7b07cee 7963 }
4b510503 7964
8ad27806
NK
7965 /*
7966 * Update cursor state *after* programming all the planes.
7967 * This avoids redundant programming in the case where we're going
7968 * to be disabling a single plane - those pipes are being disabled.
7969 */
7970 if (acrtc_state->active_planes)
7971 amdgpu_dm_commit_cursors(state);
80c218d5 7972
4b510503 7973cleanup:
74aa7bd4 7974 kfree(bundle);
e7b07cee
HW
7975}
7976
6ce8f316
NK
7977static void amdgpu_dm_commit_audio(struct drm_device *dev,
7978 struct drm_atomic_state *state)
7979{
1348969a 7980 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
7981 struct amdgpu_dm_connector *aconnector;
7982 struct drm_connector *connector;
7983 struct drm_connector_state *old_con_state, *new_con_state;
7984 struct drm_crtc_state *new_crtc_state;
7985 struct dm_crtc_state *new_dm_crtc_state;
7986 const struct dc_stream_status *status;
7987 int i, inst;
7988
7989 /* Notify device removals. */
7990 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7991 if (old_con_state->crtc != new_con_state->crtc) {
7992 /* CRTC changes require notification. */
7993 goto notify;
7994 }
7995
7996 if (!new_con_state->crtc)
7997 continue;
7998
7999 new_crtc_state = drm_atomic_get_new_crtc_state(
8000 state, new_con_state->crtc);
8001
8002 if (!new_crtc_state)
8003 continue;
8004
8005 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8006 continue;
8007
8008 notify:
8009 aconnector = to_amdgpu_dm_connector(connector);
8010
8011 mutex_lock(&adev->dm.audio_lock);
8012 inst = aconnector->audio_inst;
8013 aconnector->audio_inst = -1;
8014 mutex_unlock(&adev->dm.audio_lock);
8015
8016 amdgpu_dm_audio_eld_notify(adev, inst);
8017 }
8018
8019 /* Notify audio device additions. */
8020 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8021 if (!new_con_state->crtc)
8022 continue;
8023
8024 new_crtc_state = drm_atomic_get_new_crtc_state(
8025 state, new_con_state->crtc);
8026
8027 if (!new_crtc_state)
8028 continue;
8029
8030 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8031 continue;
8032
8033 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8034 if (!new_dm_crtc_state->stream)
8035 continue;
8036
8037 status = dc_stream_get_status(new_dm_crtc_state->stream);
8038 if (!status)
8039 continue;
8040
8041 aconnector = to_amdgpu_dm_connector(connector);
8042
8043 mutex_lock(&adev->dm.audio_lock);
8044 inst = status->audio_inst;
8045 aconnector->audio_inst = inst;
8046 mutex_unlock(&adev->dm.audio_lock);
8047
8048 amdgpu_dm_audio_eld_notify(adev, inst);
8049 }
8050}
8051
1f6010a9 8052/*
27b3f4fc
LSL
8053 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8054 * @crtc_state: the DRM CRTC state
8055 * @stream_state: the DC stream state.
8056 *
8057 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8058 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8059 */
8060static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8061 struct dc_stream_state *stream_state)
8062{
b9952f93 8063 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8064}
e7b07cee 8065
b8592b48
LL
8066/**
8067 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8068 * @state: The atomic state to commit
8069 *
8070 * This will tell DC to commit the constructed DC state from atomic_check,
8071 * programming the hardware. Any failures here implies a hardware failure, since
8072 * atomic check should have filtered anything non-kosher.
8073 */
7578ecda 8074static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8075{
8076 struct drm_device *dev = state->dev;
1348969a 8077 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8078 struct amdgpu_display_manager *dm = &adev->dm;
8079 struct dm_atomic_state *dm_state;
eb3dc897 8080 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8081 uint32_t i, j;
5cc6dcbd 8082 struct drm_crtc *crtc;
0bc9706d 8083 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8084 unsigned long flags;
8085 bool wait_for_vblank = true;
8086 struct drm_connector *connector;
c2cea706 8087 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8088 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8089 int crtc_disable_count = 0;
6ee90e88 8090 bool mode_set_reset_required = false;
e7b07cee 8091
e8a98235
RS
8092 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8093
e7b07cee
HW
8094 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8095
eb3dc897
NK
8096 dm_state = dm_atomic_get_new_state(state);
8097 if (dm_state && dm_state->context) {
8098 dc_state = dm_state->context;
8099 } else {
8100 /* No state changes, retain current state. */
813d20dc 8101 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8102 ASSERT(dc_state_temp);
8103 dc_state = dc_state_temp;
8104 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8105 }
e7b07cee 8106
6d90a208
AP
8107 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8108 new_crtc_state, i) {
8109 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8110
8111 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8112
8113 if (old_crtc_state->active &&
8114 (!new_crtc_state->active ||
8115 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8116 manage_dm_interrupts(adev, acrtc, false);
8117 dc_stream_release(dm_old_crtc_state->stream);
8118 }
8119 }
8120
8976f73b
RS
8121 drm_atomic_helper_calc_timestamping_constants(state);
8122
e7b07cee 8123 /* update changed items */
0bc9706d 8124 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8125 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8126
54d76575
LSL
8127 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8128 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8129
f1ad2f5e 8130 DRM_DEBUG_DRIVER(
e7b07cee
HW
8131 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8132 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8133 "connectors_changed:%d\n",
8134 acrtc->crtc_id,
0bc9706d
LSL
8135 new_crtc_state->enable,
8136 new_crtc_state->active,
8137 new_crtc_state->planes_changed,
8138 new_crtc_state->mode_changed,
8139 new_crtc_state->active_changed,
8140 new_crtc_state->connectors_changed);
e7b07cee 8141
5c68c652
VL
8142 /* Disable cursor if disabling crtc */
8143 if (old_crtc_state->active && !new_crtc_state->active) {
8144 struct dc_cursor_position position;
8145
8146 memset(&position, 0, sizeof(position));
8147 mutex_lock(&dm->dc_lock);
8148 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8149 mutex_unlock(&dm->dc_lock);
8150 }
8151
27b3f4fc
LSL
8152 /* Copy all transient state flags into dc state */
8153 if (dm_new_crtc_state->stream) {
8154 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8155 dm_new_crtc_state->stream);
8156 }
8157
e7b07cee
HW
8158 /* handles headless hotplug case, updating new_state and
8159 * aconnector as needed
8160 */
8161
54d76575 8162 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8163
f1ad2f5e 8164 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8165
54d76575 8166 if (!dm_new_crtc_state->stream) {
e7b07cee 8167 /*
b830ebc9
HW
8168 * this could happen because of issues with
8169 * userspace notifications delivery.
8170 * In this case userspace tries to set mode on
1f6010a9
DF
8171 * display which is disconnected in fact.
8172 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8173 * We expect reset mode will come soon.
8174 *
8175 * This can also happen when unplug is done
8176 * during resume sequence ended
8177 *
8178 * In this case, we want to pretend we still
8179 * have a sink to keep the pipe running so that
8180 * hw state is consistent with the sw state
8181 */
f1ad2f5e 8182 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8183 __func__, acrtc->base.base.id);
8184 continue;
8185 }
8186
54d76575
LSL
8187 if (dm_old_crtc_state->stream)
8188 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8189
97028037
LP
8190 pm_runtime_get_noresume(dev->dev);
8191
e7b07cee 8192 acrtc->enabled = true;
0bc9706d
LSL
8193 acrtc->hw_mode = new_crtc_state->mode;
8194 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8195 mode_set_reset_required = true;
0bc9706d 8196 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 8197 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8198 /* i.e. reset mode */
6ee90e88 8199 if (dm_old_crtc_state->stream)
54d76575 8200 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 8201 mode_set_reset_required = true;
e7b07cee
HW
8202 }
8203 } /* for_each_crtc_in_state() */
8204
eb3dc897 8205 if (dc_state) {
6ee90e88 8206 /* if there mode set or reset, disable eDP PSR */
8207 if (mode_set_reset_required)
8208 amdgpu_dm_psr_disable_all(dm);
8209
eb3dc897 8210 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8211 mutex_lock(&dm->dc_lock);
eb3dc897 8212 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 8213 mutex_unlock(&dm->dc_lock);
fa2123db 8214 }
e7b07cee 8215
0bc9706d 8216 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8217 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8218
54d76575 8219 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8220
54d76575 8221 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8222 const struct dc_stream_status *status =
54d76575 8223 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8224
eb3dc897 8225 if (!status)
09f609c3
LL
8226 status = dc_stream_get_status_from_state(dc_state,
8227 dm_new_crtc_state->stream);
e7b07cee 8228 if (!status)
54d76575 8229 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8230 else
8231 acrtc->otg_inst = status->primary_otg_inst;
8232 }
8233 }
0c8620d6
BL
8234#ifdef CONFIG_DRM_AMD_DC_HDCP
8235 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8236 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8237 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8238 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8239
8240 new_crtc_state = NULL;
8241
8242 if (acrtc)
8243 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8244
8245 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8246
8247 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8248 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8249 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8250 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8251 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8252 continue;
8253 }
8254
8255 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8256 hdcp_update_display(
8257 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8258 new_con_state->hdcp_content_type,
b1abe558
BL
8259 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8260 : false);
0c8620d6
BL
8261 }
8262#endif
e7b07cee 8263
02d6a6fc 8264 /* Handle connector state changes */
c2cea706 8265 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8266 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8267 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8268 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
8269 struct dc_surface_update dummy_updates[MAX_SURFACES];
8270 struct dc_stream_update stream_update;
b232d4ed 8271 struct dc_info_packet hdr_packet;
e7b07cee 8272 struct dc_stream_status *status = NULL;
b232d4ed 8273 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8274
19afd799
NC
8275 memset(&dummy_updates, 0, sizeof(dummy_updates));
8276 memset(&stream_update, 0, sizeof(stream_update));
8277
44d09c6a 8278 if (acrtc) {
0bc9706d 8279 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8280 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8281 }
0bc9706d 8282
e7b07cee 8283 /* Skip any modesets/resets */
0bc9706d 8284 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8285 continue;
8286
54d76575 8287 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8288 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8289
b232d4ed
NK
8290 scaling_changed = is_scaling_state_different(dm_new_con_state,
8291 dm_old_con_state);
8292
8293 abm_changed = dm_new_crtc_state->abm_level !=
8294 dm_old_crtc_state->abm_level;
8295
8296 hdr_changed =
8297 is_hdr_metadata_different(old_con_state, new_con_state);
8298
8299 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8300 continue;
e7b07cee 8301
b6e881c9 8302 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8303 if (scaling_changed) {
02d6a6fc 8304 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8305 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8306
02d6a6fc
DF
8307 stream_update.src = dm_new_crtc_state->stream->src;
8308 stream_update.dst = dm_new_crtc_state->stream->dst;
8309 }
8310
b232d4ed 8311 if (abm_changed) {
02d6a6fc
DF
8312 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8313
8314 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8315 }
70e8ffc5 8316
b232d4ed
NK
8317 if (hdr_changed) {
8318 fill_hdr_info_packet(new_con_state, &hdr_packet);
8319 stream_update.hdr_static_metadata = &hdr_packet;
8320 }
8321
54d76575 8322 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8323 WARN_ON(!status);
3be5262e 8324 WARN_ON(!status->plane_count);
e7b07cee 8325
02d6a6fc
DF
8326 /*
8327 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8328 * Here we create an empty update on each plane.
8329 * To fix this, DC should permit updating only stream properties.
8330 */
8331 for (j = 0; j < status->plane_count; j++)
8332 dummy_updates[j].surface = status->plane_states[0];
8333
8334
8335 mutex_lock(&dm->dc_lock);
8336 dc_commit_updates_for_stream(dm->dc,
8337 dummy_updates,
8338 status->plane_count,
8339 dm_new_crtc_state->stream,
8340 &stream_update,
8341 dc_state);
8342 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8343 }
8344
b5e83f6f 8345 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8346 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8347 new_crtc_state, i) {
fe2a1965
LP
8348 if (old_crtc_state->active && !new_crtc_state->active)
8349 crtc_disable_count++;
8350
54d76575 8351 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8352 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8353
585d450c
AP
8354 /* For freesync config update on crtc state and params for irq */
8355 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8356
66b0c973
MK
8357 /* Handle vrr on->off / off->on transitions */
8358 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8359 dm_new_crtc_state);
e7b07cee
HW
8360 }
8361
8fe684e9
NK
8362 /**
8363 * Enable interrupts for CRTCs that are newly enabled or went through
8364 * a modeset. It was intentionally deferred until after the front end
8365 * state was modified to wait until the OTG was on and so the IRQ
8366 * handlers didn't access stale or invalid state.
8367 */
8368 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8369 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
c920888c 8370 bool configure_crc = false;
8fe684e9 8371
585d450c
AP
8372 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8373
8fe684e9
NK
8374 if (new_crtc_state->active &&
8375 (!old_crtc_state->active ||
8376 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8377 dc_stream_retain(dm_new_crtc_state->stream);
8378 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8379 manage_dm_interrupts(adev, acrtc, true);
c920888c 8380 }
8fe684e9 8381#ifdef CONFIG_DEBUG_FS
c920888c
WL
8382 if (new_crtc_state->active &&
8383 amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8fe684e9
NK
8384 /**
8385 * Frontend may have changed so reapply the CRC capture
8386 * settings for the stream.
8387 */
8388 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 8389 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8fe684e9 8390
c920888c
WL
8391 if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8392 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8393 configure_crc = true;
8394 } else {
8395 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8396 configure_crc = true;
8fe684e9 8397 }
c920888c
WL
8398
8399 if (configure_crc)
8400 amdgpu_dm_crtc_configure_crc_source(
8401 crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8fe684e9 8402 }
c920888c 8403#endif
8fe684e9 8404 }
e7b07cee 8405
420cd472 8406 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8407 if (new_crtc_state->async_flip)
420cd472
DF
8408 wait_for_vblank = false;
8409
e7b07cee 8410 /* update planes when needed per crtc*/
5cc6dcbd 8411 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8412 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8413
54d76575 8414 if (dm_new_crtc_state->stream)
eb3dc897 8415 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8416 dm, crtc, wait_for_vblank);
e7b07cee
HW
8417 }
8418
6ce8f316
NK
8419 /* Update audio instances for each connector. */
8420 amdgpu_dm_commit_audio(dev, state);
8421
e7b07cee
HW
8422 /*
8423 * send vblank event on all events not handled in flip and
8424 * mark consumed event for drm_atomic_helper_commit_hw_done
8425 */
4a580877 8426 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8427 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8428
0bc9706d
LSL
8429 if (new_crtc_state->event)
8430 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8431
0bc9706d 8432 new_crtc_state->event = NULL;
e7b07cee 8433 }
4a580877 8434 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8435
29c8f234
LL
8436 /* Signal HW programming completion */
8437 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8438
8439 if (wait_for_vblank)
320a1274 8440 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8441
8442 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8443
5f6fab24
AD
8444 /* return the stolen vga memory back to VRAM */
8445 if (!adev->mman.keep_stolen_vga_memory)
8446 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8447 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8448
1f6010a9
DF
8449 /*
8450 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8451 * so we can put the GPU into runtime suspend if we're not driving any
8452 * displays anymore
8453 */
fe2a1965
LP
8454 for (i = 0; i < crtc_disable_count; i++)
8455 pm_runtime_put_autosuspend(dev->dev);
97028037 8456 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8457
8458 if (dc_state_temp)
8459 dc_release_state(dc_state_temp);
e7b07cee
HW
8460}
8461
8462
8463static int dm_force_atomic_commit(struct drm_connector *connector)
8464{
8465 int ret = 0;
8466 struct drm_device *ddev = connector->dev;
8467 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8468 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8469 struct drm_plane *plane = disconnected_acrtc->base.primary;
8470 struct drm_connector_state *conn_state;
8471 struct drm_crtc_state *crtc_state;
8472 struct drm_plane_state *plane_state;
8473
8474 if (!state)
8475 return -ENOMEM;
8476
8477 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8478
8479 /* Construct an atomic state to restore previous display setting */
8480
8481 /*
8482 * Attach connectors to drm_atomic_state
8483 */
8484 conn_state = drm_atomic_get_connector_state(state, connector);
8485
8486 ret = PTR_ERR_OR_ZERO(conn_state);
8487 if (ret)
8488 goto err;
8489
8490 /* Attach crtc to drm_atomic_state*/
8491 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8492
8493 ret = PTR_ERR_OR_ZERO(crtc_state);
8494 if (ret)
8495 goto err;
8496
8497 /* force a restore */
8498 crtc_state->mode_changed = true;
8499
8500 /* Attach plane to drm_atomic_state */
8501 plane_state = drm_atomic_get_plane_state(state, plane);
8502
8503 ret = PTR_ERR_OR_ZERO(plane_state);
8504 if (ret)
8505 goto err;
8506
8507
8508 /* Call commit internally with the state we just constructed */
8509 ret = drm_atomic_commit(state);
8510 if (!ret)
8511 return 0;
8512
8513err:
8514 DRM_ERROR("Restoring old state failed with %i\n", ret);
8515 drm_atomic_state_put(state);
8516
8517 return ret;
8518}
8519
8520/*
1f6010a9
DF
8521 * This function handles all cases when set mode does not come upon hotplug.
8522 * This includes when a display is unplugged then plugged back into the
8523 * same port and when running without usermode desktop manager supprot
e7b07cee 8524 */
3ee6b26b
AD
8525void dm_restore_drm_connector_state(struct drm_device *dev,
8526 struct drm_connector *connector)
e7b07cee 8527{
c84dec2f 8528 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8529 struct amdgpu_crtc *disconnected_acrtc;
8530 struct dm_crtc_state *acrtc_state;
8531
8532 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8533 return;
8534
8535 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8536 if (!disconnected_acrtc)
8537 return;
e7b07cee 8538
70e8ffc5
HW
8539 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8540 if (!acrtc_state->stream)
e7b07cee
HW
8541 return;
8542
8543 /*
8544 * If the previous sink is not released and different from the current,
8545 * we deduce we are in a state where we can not rely on usermode call
8546 * to turn on the display, so we do it here
8547 */
8548 if (acrtc_state->stream->sink != aconnector->dc_sink)
8549 dm_force_atomic_commit(&aconnector->base);
8550}
8551
1f6010a9 8552/*
e7b07cee
HW
8553 * Grabs all modesetting locks to serialize against any blocking commits,
8554 * Waits for completion of all non blocking commits.
8555 */
3ee6b26b
AD
8556static int do_aquire_global_lock(struct drm_device *dev,
8557 struct drm_atomic_state *state)
e7b07cee
HW
8558{
8559 struct drm_crtc *crtc;
8560 struct drm_crtc_commit *commit;
8561 long ret;
8562
1f6010a9
DF
8563 /*
8564 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8565 * ensure that when the framework release it the
8566 * extra locks we are locking here will get released to
8567 */
8568 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8569 if (ret)
8570 return ret;
8571
8572 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8573 spin_lock(&crtc->commit_lock);
8574 commit = list_first_entry_or_null(&crtc->commit_list,
8575 struct drm_crtc_commit, commit_entry);
8576 if (commit)
8577 drm_crtc_commit_get(commit);
8578 spin_unlock(&crtc->commit_lock);
8579
8580 if (!commit)
8581 continue;
8582
1f6010a9
DF
8583 /*
8584 * Make sure all pending HW programming completed and
e7b07cee
HW
8585 * page flips done
8586 */
8587 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8588
8589 if (ret > 0)
8590 ret = wait_for_completion_interruptible_timeout(
8591 &commit->flip_done, 10*HZ);
8592
8593 if (ret == 0)
8594 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8595 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8596
8597 drm_crtc_commit_put(commit);
8598 }
8599
8600 return ret < 0 ? ret : 0;
8601}
8602
bb47de73
NK
8603static void get_freesync_config_for_crtc(
8604 struct dm_crtc_state *new_crtc_state,
8605 struct dm_connector_state *new_con_state)
98e6436d
AK
8606{
8607 struct mod_freesync_config config = {0};
98e6436d
AK
8608 struct amdgpu_dm_connector *aconnector =
8609 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8610 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8611 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8612
a057ec46 8613 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8614 vrefresh >= aconnector->min_vfreq &&
8615 vrefresh <= aconnector->max_vfreq;
bb47de73 8616
a057ec46
IB
8617 if (new_crtc_state->vrr_supported) {
8618 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 8619 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
8620 VRR_STATE_ACTIVE_VARIABLE :
8621 VRR_STATE_INACTIVE;
8622 config.min_refresh_in_uhz =
8623 aconnector->min_vfreq * 1000000;
8624 config.max_refresh_in_uhz =
8625 aconnector->max_vfreq * 1000000;
69ff8845 8626 config.vsif_supported = true;
180db303 8627 config.btr = true;
98e6436d
AK
8628 }
8629
bb47de73
NK
8630 new_crtc_state->freesync_config = config;
8631}
98e6436d 8632
bb47de73
NK
8633static void reset_freesync_config_for_crtc(
8634 struct dm_crtc_state *new_crtc_state)
8635{
8636 new_crtc_state->vrr_supported = false;
98e6436d 8637
bb47de73
NK
8638 memset(&new_crtc_state->vrr_infopacket, 0,
8639 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8640}
8641
4b9674e5
LL
8642static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8643 struct drm_atomic_state *state,
8644 struct drm_crtc *crtc,
8645 struct drm_crtc_state *old_crtc_state,
8646 struct drm_crtc_state *new_crtc_state,
8647 bool enable,
8648 bool *lock_and_validation_needed)
e7b07cee 8649{
eb3dc897 8650 struct dm_atomic_state *dm_state = NULL;
54d76575 8651 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8652 struct dc_stream_state *new_stream;
62f55537 8653 int ret = 0;
d4d4a645 8654
1f6010a9
DF
8655 /*
8656 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8657 * update changed items
8658 */
4b9674e5
LL
8659 struct amdgpu_crtc *acrtc = NULL;
8660 struct amdgpu_dm_connector *aconnector = NULL;
8661 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8662 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8663
4b9674e5 8664 new_stream = NULL;
9635b754 8665
4b9674e5
LL
8666 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8667 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8668 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8669 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8670
4b9674e5
LL
8671 /* TODO This hack should go away */
8672 if (aconnector && enable) {
8673 /* Make sure fake sink is created in plug-in scenario */
8674 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8675 &aconnector->base);
8676 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8677 &aconnector->base);
19f89e23 8678
4b9674e5
LL
8679 if (IS_ERR(drm_new_conn_state)) {
8680 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8681 goto fail;
8682 }
19f89e23 8683
4b9674e5
LL
8684 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8685 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8686
02d35a67
JFZ
8687 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8688 goto skip_modeset;
8689
cbd14ae7
SW
8690 new_stream = create_validate_stream_for_sink(aconnector,
8691 &new_crtc_state->mode,
8692 dm_new_conn_state,
8693 dm_old_crtc_state->stream);
19f89e23 8694
4b9674e5
LL
8695 /*
8696 * we can have no stream on ACTION_SET if a display
8697 * was disconnected during S3, in this case it is not an
8698 * error, the OS will be updated after detection, and
8699 * will do the right thing on next atomic commit
8700 */
19f89e23 8701
4b9674e5
LL
8702 if (!new_stream) {
8703 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8704 __func__, acrtc->base.base.id);
8705 ret = -ENOMEM;
8706 goto fail;
8707 }
e7b07cee 8708
3d4e52d0
VL
8709 /*
8710 * TODO: Check VSDB bits to decide whether this should
8711 * be enabled or not.
8712 */
8713 new_stream->triggered_crtc_reset.enabled =
8714 dm->force_timing_sync;
8715
4b9674e5 8716 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8717
88694af9
NK
8718 ret = fill_hdr_info_packet(drm_new_conn_state,
8719 &new_stream->hdr_static_metadata);
8720 if (ret)
8721 goto fail;
8722
7e930949
NK
8723 /*
8724 * If we already removed the old stream from the context
8725 * (and set the new stream to NULL) then we can't reuse
8726 * the old stream even if the stream and scaling are unchanged.
8727 * We'll hit the BUG_ON and black screen.
8728 *
8729 * TODO: Refactor this function to allow this check to work
8730 * in all conditions.
8731 */
8732 if (dm_new_crtc_state->stream &&
8733 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8734 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8735 new_crtc_state->mode_changed = false;
8736 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8737 new_crtc_state->mode_changed);
62f55537 8738 }
4b9674e5 8739 }
b830ebc9 8740
02d35a67 8741 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8742 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8743 goto skip_modeset;
e7b07cee 8744
4b9674e5
LL
8745 DRM_DEBUG_DRIVER(
8746 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8747 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8748 "connectors_changed:%d\n",
8749 acrtc->crtc_id,
8750 new_crtc_state->enable,
8751 new_crtc_state->active,
8752 new_crtc_state->planes_changed,
8753 new_crtc_state->mode_changed,
8754 new_crtc_state->active_changed,
8755 new_crtc_state->connectors_changed);
62f55537 8756
4b9674e5
LL
8757 /* Remove stream for any changed/disabled CRTC */
8758 if (!enable) {
62f55537 8759
4b9674e5
LL
8760 if (!dm_old_crtc_state->stream)
8761 goto skip_modeset;
eb3dc897 8762
4b9674e5
LL
8763 ret = dm_atomic_get_state(state, &dm_state);
8764 if (ret)
8765 goto fail;
e7b07cee 8766
4b9674e5
LL
8767 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8768 crtc->base.id);
62f55537 8769
4b9674e5
LL
8770 /* i.e. reset mode */
8771 if (dc_remove_stream_from_ctx(
8772 dm->dc,
8773 dm_state->context,
8774 dm_old_crtc_state->stream) != DC_OK) {
8775 ret = -EINVAL;
8776 goto fail;
8777 }
62f55537 8778
4b9674e5
LL
8779 dc_stream_release(dm_old_crtc_state->stream);
8780 dm_new_crtc_state->stream = NULL;
bb47de73 8781
4b9674e5 8782 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8783
4b9674e5 8784 *lock_and_validation_needed = true;
62f55537 8785
4b9674e5
LL
8786 } else {/* Add stream for any updated/enabled CRTC */
8787 /*
8788 * Quick fix to prevent NULL pointer on new_stream when
8789 * added MST connectors not found in existing crtc_state in the chained mode
8790 * TODO: need to dig out the root cause of that
8791 */
8792 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8793 goto skip_modeset;
62f55537 8794
4b9674e5
LL
8795 if (modereset_required(new_crtc_state))
8796 goto skip_modeset;
62f55537 8797
4b9674e5
LL
8798 if (modeset_required(new_crtc_state, new_stream,
8799 dm_old_crtc_state->stream)) {
62f55537 8800
4b9674e5 8801 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8802
4b9674e5
LL
8803 ret = dm_atomic_get_state(state, &dm_state);
8804 if (ret)
8805 goto fail;
27b3f4fc 8806
4b9674e5 8807 dm_new_crtc_state->stream = new_stream;
62f55537 8808
4b9674e5 8809 dc_stream_retain(new_stream);
1dc90497 8810
4b9674e5
LL
8811 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8812 crtc->base.id);
1dc90497 8813
4b9674e5
LL
8814 if (dc_add_stream_to_ctx(
8815 dm->dc,
8816 dm_state->context,
8817 dm_new_crtc_state->stream) != DC_OK) {
8818 ret = -EINVAL;
8819 goto fail;
9b690ef3
BL
8820 }
8821
4b9674e5
LL
8822 *lock_and_validation_needed = true;
8823 }
8824 }
e277adc5 8825
4b9674e5
LL
8826skip_modeset:
8827 /* Release extra reference */
8828 if (new_stream)
8829 dc_stream_release(new_stream);
e277adc5 8830
4b9674e5
LL
8831 /*
8832 * We want to do dc stream updates that do not require a
8833 * full modeset below.
8834 */
2afda735 8835 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8836 return 0;
8837 /*
8838 * Given above conditions, the dc state cannot be NULL because:
8839 * 1. We're in the process of enabling CRTCs (just been added
8840 * to the dc context, or already is on the context)
8841 * 2. Has a valid connector attached, and
8842 * 3. Is currently active and enabled.
8843 * => The dc stream state currently exists.
8844 */
8845 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8846
4b9674e5
LL
8847 /* Scaling or underscan settings */
8848 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8849 update_stream_scaling_settings(
8850 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8851
b05e2c5e
DF
8852 /* ABM settings */
8853 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8854
4b9674e5
LL
8855 /*
8856 * Color management settings. We also update color properties
8857 * when a modeset is needed, to ensure it gets reprogrammed.
8858 */
8859 if (dm_new_crtc_state->base.color_mgmt_changed ||
8860 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8861 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8862 if (ret)
8863 goto fail;
62f55537 8864 }
e7b07cee 8865
4b9674e5
LL
8866 /* Update Freesync settings. */
8867 get_freesync_config_for_crtc(dm_new_crtc_state,
8868 dm_new_conn_state);
8869
62f55537 8870 return ret;
9635b754
DS
8871
8872fail:
8873 if (new_stream)
8874 dc_stream_release(new_stream);
8875 return ret;
62f55537 8876}
9b690ef3 8877
f6ff2a08
NK
8878static bool should_reset_plane(struct drm_atomic_state *state,
8879 struct drm_plane *plane,
8880 struct drm_plane_state *old_plane_state,
8881 struct drm_plane_state *new_plane_state)
8882{
8883 struct drm_plane *other;
8884 struct drm_plane_state *old_other_state, *new_other_state;
8885 struct drm_crtc_state *new_crtc_state;
8886 int i;
8887
70a1efac
NK
8888 /*
8889 * TODO: Remove this hack once the checks below are sufficient
8890 * enough to determine when we need to reset all the planes on
8891 * the stream.
8892 */
8893 if (state->allow_modeset)
8894 return true;
8895
f6ff2a08
NK
8896 /* Exit early if we know that we're adding or removing the plane. */
8897 if (old_plane_state->crtc != new_plane_state->crtc)
8898 return true;
8899
8900 /* old crtc == new_crtc == NULL, plane not in context. */
8901 if (!new_plane_state->crtc)
8902 return false;
8903
8904 new_crtc_state =
8905 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8906
8907 if (!new_crtc_state)
8908 return true;
8909
7316c4ad
NK
8910 /* CRTC Degamma changes currently require us to recreate planes. */
8911 if (new_crtc_state->color_mgmt_changed)
8912 return true;
8913
f6ff2a08
NK
8914 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8915 return true;
8916
8917 /*
8918 * If there are any new primary or overlay planes being added or
8919 * removed then the z-order can potentially change. To ensure
8920 * correct z-order and pipe acquisition the current DC architecture
8921 * requires us to remove and recreate all existing planes.
8922 *
8923 * TODO: Come up with a more elegant solution for this.
8924 */
8925 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 8926 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
8927 if (other->type == DRM_PLANE_TYPE_CURSOR)
8928 continue;
8929
8930 if (old_other_state->crtc != new_plane_state->crtc &&
8931 new_other_state->crtc != new_plane_state->crtc)
8932 continue;
8933
8934 if (old_other_state->crtc != new_other_state->crtc)
8935 return true;
8936
dc4cb30d
NK
8937 /* Src/dst size and scaling updates. */
8938 if (old_other_state->src_w != new_other_state->src_w ||
8939 old_other_state->src_h != new_other_state->src_h ||
8940 old_other_state->crtc_w != new_other_state->crtc_w ||
8941 old_other_state->crtc_h != new_other_state->crtc_h)
8942 return true;
8943
8944 /* Rotation / mirroring updates. */
8945 if (old_other_state->rotation != new_other_state->rotation)
8946 return true;
8947
8948 /* Blending updates. */
8949 if (old_other_state->pixel_blend_mode !=
8950 new_other_state->pixel_blend_mode)
8951 return true;
8952
8953 /* Alpha updates. */
8954 if (old_other_state->alpha != new_other_state->alpha)
8955 return true;
8956
8957 /* Colorspace changes. */
8958 if (old_other_state->color_range != new_other_state->color_range ||
8959 old_other_state->color_encoding != new_other_state->color_encoding)
8960 return true;
8961
9a81cc60
NK
8962 /* Framebuffer checks fall at the end. */
8963 if (!old_other_state->fb || !new_other_state->fb)
8964 continue;
8965
8966 /* Pixel format changes can require bandwidth updates. */
8967 if (old_other_state->fb->format != new_other_state->fb->format)
8968 return true;
8969
6eed95b0
BN
8970 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8971 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
8972
8973 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
8974 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8975 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
8976 return true;
8977 }
8978
8979 return false;
8980}
8981
b0455fda
SS
8982static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8983 struct drm_plane_state *new_plane_state,
8984 struct drm_framebuffer *fb)
8985{
e72868c4
SS
8986 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8987 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 8988 unsigned int pitch;
e72868c4 8989 bool linear;
b0455fda
SS
8990
8991 if (fb->width > new_acrtc->max_cursor_width ||
8992 fb->height > new_acrtc->max_cursor_height) {
8993 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8994 new_plane_state->fb->width,
8995 new_plane_state->fb->height);
8996 return -EINVAL;
8997 }
8998 if (new_plane_state->src_w != fb->width << 16 ||
8999 new_plane_state->src_h != fb->height << 16) {
9000 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9001 return -EINVAL;
9002 }
9003
9004 /* Pitch in pixels */
9005 pitch = fb->pitches[0] / fb->format->cpp[0];
9006
9007 if (fb->width != pitch) {
9008 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9009 fb->width, pitch);
9010 return -EINVAL;
9011 }
9012
9013 switch (pitch) {
9014 case 64:
9015 case 128:
9016 case 256:
9017 /* FB pitch is supported by cursor plane */
9018 break;
9019 default:
9020 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9021 return -EINVAL;
9022 }
9023
e72868c4
SS
9024 /* Core DRM takes care of checking FB modifiers, so we only need to
9025 * check tiling flags when the FB doesn't have a modifier. */
9026 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9027 if (adev->family < AMDGPU_FAMILY_AI) {
9028 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9029 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9030 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9031 } else {
9032 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9033 }
9034 if (!linear) {
9035 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9036 return -EINVAL;
9037 }
9038 }
9039
b0455fda
SS
9040 return 0;
9041}
9042
9e869063
LL
9043static int dm_update_plane_state(struct dc *dc,
9044 struct drm_atomic_state *state,
9045 struct drm_plane *plane,
9046 struct drm_plane_state *old_plane_state,
9047 struct drm_plane_state *new_plane_state,
9048 bool enable,
9049 bool *lock_and_validation_needed)
62f55537 9050{
eb3dc897
NK
9051
9052 struct dm_atomic_state *dm_state = NULL;
62f55537 9053 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9054 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9055 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9056 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9057 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9058 bool needs_reset;
62f55537 9059 int ret = 0;
e7b07cee 9060
9b690ef3 9061
9e869063
LL
9062 new_plane_crtc = new_plane_state->crtc;
9063 old_plane_crtc = old_plane_state->crtc;
9064 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9065 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9066
626bf90f
SS
9067 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9068 if (!enable || !new_plane_crtc ||
9069 drm_atomic_plane_disabling(plane->state, new_plane_state))
9070 return 0;
9071
9072 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9073
5f581248
SS
9074 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9075 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9076 return -EINVAL;
9077 }
9078
24f99d2b 9079 if (new_plane_state->fb) {
b0455fda
SS
9080 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9081 new_plane_state->fb);
9082 if (ret)
9083 return ret;
24f99d2b
SS
9084 }
9085
9e869063 9086 return 0;
626bf90f 9087 }
9b690ef3 9088
f6ff2a08
NK
9089 needs_reset = should_reset_plane(state, plane, old_plane_state,
9090 new_plane_state);
9091
9e869063
LL
9092 /* Remove any changed/removed planes */
9093 if (!enable) {
f6ff2a08 9094 if (!needs_reset)
9e869063 9095 return 0;
a7b06724 9096
9e869063
LL
9097 if (!old_plane_crtc)
9098 return 0;
62f55537 9099
9e869063
LL
9100 old_crtc_state = drm_atomic_get_old_crtc_state(
9101 state, old_plane_crtc);
9102 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9103
9e869063
LL
9104 if (!dm_old_crtc_state->stream)
9105 return 0;
62f55537 9106
9e869063
LL
9107 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9108 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9109
9e869063
LL
9110 ret = dm_atomic_get_state(state, &dm_state);
9111 if (ret)
9112 return ret;
eb3dc897 9113
9e869063
LL
9114 if (!dc_remove_plane_from_context(
9115 dc,
9116 dm_old_crtc_state->stream,
9117 dm_old_plane_state->dc_state,
9118 dm_state->context)) {
62f55537 9119
c3537613 9120 return -EINVAL;
9e869063 9121 }
e7b07cee 9122
9b690ef3 9123
9e869063
LL
9124 dc_plane_state_release(dm_old_plane_state->dc_state);
9125 dm_new_plane_state->dc_state = NULL;
1dc90497 9126
9e869063 9127 *lock_and_validation_needed = true;
1dc90497 9128
9e869063
LL
9129 } else { /* Add new planes */
9130 struct dc_plane_state *dc_new_plane_state;
1dc90497 9131
9e869063
LL
9132 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9133 return 0;
e7b07cee 9134
9e869063
LL
9135 if (!new_plane_crtc)
9136 return 0;
e7b07cee 9137
9e869063
LL
9138 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9139 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9140
9e869063
LL
9141 if (!dm_new_crtc_state->stream)
9142 return 0;
62f55537 9143
f6ff2a08 9144 if (!needs_reset)
9e869063 9145 return 0;
62f55537 9146
8c44515b
AP
9147 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9148 if (ret)
9149 return ret;
9150
9e869063 9151 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9152
9e869063
LL
9153 dc_new_plane_state = dc_create_plane_state(dc);
9154 if (!dc_new_plane_state)
9155 return -ENOMEM;
62f55537 9156
9e869063
LL
9157 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9158 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9159
695af5f9 9160 ret = fill_dc_plane_attributes(
1348969a 9161 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9162 dc_new_plane_state,
9163 new_plane_state,
9164 new_crtc_state);
9165 if (ret) {
9166 dc_plane_state_release(dc_new_plane_state);
9167 return ret;
9168 }
62f55537 9169
9e869063
LL
9170 ret = dm_atomic_get_state(state, &dm_state);
9171 if (ret) {
9172 dc_plane_state_release(dc_new_plane_state);
9173 return ret;
9174 }
eb3dc897 9175
9e869063
LL
9176 /*
9177 * Any atomic check errors that occur after this will
9178 * not need a release. The plane state will be attached
9179 * to the stream, and therefore part of the atomic
9180 * state. It'll be released when the atomic state is
9181 * cleaned.
9182 */
9183 if (!dc_add_plane_to_context(
9184 dc,
9185 dm_new_crtc_state->stream,
9186 dc_new_plane_state,
9187 dm_state->context)) {
62f55537 9188
9e869063
LL
9189 dc_plane_state_release(dc_new_plane_state);
9190 return -EINVAL;
9191 }
8c45c5db 9192
9e869063 9193 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9194
9e869063
LL
9195 /* Tell DC to do a full surface update every time there
9196 * is a plane change. Inefficient, but works for now.
9197 */
9198 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9199
9200 *lock_and_validation_needed = true;
62f55537 9201 }
e7b07cee
HW
9202
9203
62f55537
AG
9204 return ret;
9205}
a87fa993 9206
12f4849a
SS
9207static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9208 struct drm_crtc *crtc,
9209 struct drm_crtc_state *new_crtc_state)
9210{
9211 struct drm_plane_state *new_cursor_state, *new_primary_state;
9212 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9213
9214 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9215 * cursor per pipe but it's going to inherit the scaling and
9216 * positioning from the underlying pipe. Check the cursor plane's
9217 * blending properties match the primary plane's. */
9218
9219 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9220 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9221 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9222 return 0;
9223 }
9224
9225 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9226 (new_cursor_state->src_w >> 16);
9227 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9228 (new_cursor_state->src_h >> 16);
9229
9230 primary_scale_w = new_primary_state->crtc_w * 1000 /
9231 (new_primary_state->src_w >> 16);
9232 primary_scale_h = new_primary_state->crtc_h * 1000 /
9233 (new_primary_state->src_h >> 16);
9234
9235 if (cursor_scale_w != primary_scale_w ||
9236 cursor_scale_h != primary_scale_h) {
9237 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9238 return -EINVAL;
9239 }
9240
9241 return 0;
9242}
9243
e10517b3 9244#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9245static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9246{
9247 struct drm_connector *connector;
9248 struct drm_connector_state *conn_state;
9249 struct amdgpu_dm_connector *aconnector = NULL;
9250 int i;
9251 for_each_new_connector_in_state(state, connector, conn_state, i) {
9252 if (conn_state->crtc != crtc)
9253 continue;
9254
9255 aconnector = to_amdgpu_dm_connector(connector);
9256 if (!aconnector->port || !aconnector->mst_port)
9257 aconnector = NULL;
9258 else
9259 break;
9260 }
9261
9262 if (!aconnector)
9263 return 0;
9264
9265 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9266}
e10517b3 9267#endif
44be939f 9268
b8592b48
LL
9269/**
9270 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9271 * @dev: The DRM device
9272 * @state: The atomic state to commit
9273 *
9274 * Validate that the given atomic state is programmable by DC into hardware.
9275 * This involves constructing a &struct dc_state reflecting the new hardware
9276 * state we wish to commit, then querying DC to see if it is programmable. It's
9277 * important not to modify the existing DC state. Otherwise, atomic_check
9278 * may unexpectedly commit hardware changes.
9279 *
9280 * When validating the DC state, it's important that the right locks are
9281 * acquired. For full updates case which removes/adds/updates streams on one
9282 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9283 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9284 * flip using DRMs synchronization events.
b8592b48
LL
9285 *
9286 * Note that DM adds the affected connectors for all CRTCs in state, when that
9287 * might not seem necessary. This is because DC stream creation requires the
9288 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9289 * be possible but non-trivial - a possible TODO item.
9290 *
9291 * Return: -Error code if validation failed.
9292 */
7578ecda
AD
9293static int amdgpu_dm_atomic_check(struct drm_device *dev,
9294 struct drm_atomic_state *state)
62f55537 9295{
1348969a 9296 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9297 struct dm_atomic_state *dm_state = NULL;
62f55537 9298 struct dc *dc = adev->dm.dc;
62f55537 9299 struct drm_connector *connector;
c2cea706 9300 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9301 struct drm_crtc *crtc;
fc9e9920 9302 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9303 struct drm_plane *plane;
9304 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9305 enum dc_status status;
1e88ad0a 9306 int ret, i;
62f55537 9307 bool lock_and_validation_needed = false;
886876ec 9308 struct dm_crtc_state *dm_old_crtc_state;
62f55537 9309
e8a98235 9310 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9311
62f55537 9312 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
9313 if (ret)
9314 goto fail;
62f55537 9315
c5892a10
SW
9316 /* Check connector changes */
9317 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9318 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9319 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9320
9321 /* Skip connectors that are disabled or part of modeset already. */
9322 if (!old_con_state->crtc && !new_con_state->crtc)
9323 continue;
9324
9325 if (!new_con_state->crtc)
9326 continue;
9327
9328 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9329 if (IS_ERR(new_crtc_state)) {
9330 ret = PTR_ERR(new_crtc_state);
9331 goto fail;
9332 }
9333
9334 if (dm_old_con_state->abm_level !=
9335 dm_new_con_state->abm_level)
9336 new_crtc_state->connectors_changed = true;
9337 }
9338
e10517b3 9339#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9340 if (adev->asic_type >= CHIP_NAVI10) {
9341 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9342 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9343 ret = add_affected_mst_dsc_crtcs(state, crtc);
9344 if (ret)
9345 goto fail;
9346 }
9347 }
9348 }
e10517b3 9349#endif
1e88ad0a 9350 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9351 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9352
1e88ad0a 9353 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9354 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9355 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9356 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9357 continue;
7bef1af3 9358
1e88ad0a
S
9359 if (!new_crtc_state->enable)
9360 continue;
fc9e9920 9361
1e88ad0a
S
9362 ret = drm_atomic_add_affected_connectors(state, crtc);
9363 if (ret)
9364 return ret;
fc9e9920 9365
1e88ad0a
S
9366 ret = drm_atomic_add_affected_planes(state, crtc);
9367 if (ret)
9368 goto fail;
115a385c 9369
cbac53f7 9370 if (dm_old_crtc_state->dsc_force_changed)
115a385c 9371 new_crtc_state->mode_changed = true;
e7b07cee
HW
9372 }
9373
2d9e6431
NK
9374 /*
9375 * Add all primary and overlay planes on the CRTC to the state
9376 * whenever a plane is enabled to maintain correct z-ordering
9377 * and to enable fast surface updates.
9378 */
9379 drm_for_each_crtc(crtc, dev) {
9380 bool modified = false;
9381
9382 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9383 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9384 continue;
9385
9386 if (new_plane_state->crtc == crtc ||
9387 old_plane_state->crtc == crtc) {
9388 modified = true;
9389 break;
9390 }
9391 }
9392
9393 if (!modified)
9394 continue;
9395
9396 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9397 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9398 continue;
9399
9400 new_plane_state =
9401 drm_atomic_get_plane_state(state, plane);
9402
9403 if (IS_ERR(new_plane_state)) {
9404 ret = PTR_ERR(new_plane_state);
9405 goto fail;
9406 }
9407 }
9408 }
9409
62f55537 9410 /* Remove exiting planes if they are modified */
9e869063
LL
9411 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9412 ret = dm_update_plane_state(dc, state, plane,
9413 old_plane_state,
9414 new_plane_state,
9415 false,
9416 &lock_and_validation_needed);
9417 if (ret)
9418 goto fail;
62f55537
AG
9419 }
9420
9421 /* Disable all crtcs which require disable */
4b9674e5
LL
9422 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9423 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9424 old_crtc_state,
9425 new_crtc_state,
9426 false,
9427 &lock_and_validation_needed);
9428 if (ret)
9429 goto fail;
62f55537
AG
9430 }
9431
9432 /* Enable all crtcs which require enable */
4b9674e5
LL
9433 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9434 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9435 old_crtc_state,
9436 new_crtc_state,
9437 true,
9438 &lock_and_validation_needed);
9439 if (ret)
9440 goto fail;
62f55537
AG
9441 }
9442
9443 /* Add new/modified planes */
9e869063
LL
9444 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9445 ret = dm_update_plane_state(dc, state, plane,
9446 old_plane_state,
9447 new_plane_state,
9448 true,
9449 &lock_and_validation_needed);
9450 if (ret)
9451 goto fail;
62f55537
AG
9452 }
9453
b349f76e
ES
9454 /* Run this here since we want to validate the streams we created */
9455 ret = drm_atomic_helper_check_planes(dev, state);
9456 if (ret)
9457 goto fail;
62f55537 9458
12f4849a
SS
9459 /* Check cursor planes scaling */
9460 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9461 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9462 if (ret)
9463 goto fail;
9464 }
9465
43d10d30
NK
9466 if (state->legacy_cursor_update) {
9467 /*
9468 * This is a fast cursor update coming from the plane update
9469 * helper, check if it can be done asynchronously for better
9470 * performance.
9471 */
9472 state->async_update =
9473 !drm_atomic_helper_async_check(dev, state);
9474
9475 /*
9476 * Skip the remaining global validation if this is an async
9477 * update. Cursor updates can be done without affecting
9478 * state or bandwidth calcs and this avoids the performance
9479 * penalty of locking the private state object and
9480 * allocating a new dc_state.
9481 */
9482 if (state->async_update)
9483 return 0;
9484 }
9485
ebdd27e1 9486 /* Check scaling and underscan changes*/
1f6010a9 9487 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
9488 * new stream into context w\o causing full reset. Need to
9489 * decide how to handle.
9490 */
c2cea706 9491 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9492 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9493 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9494 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
9495
9496 /* Skip any modesets/resets */
0bc9706d
LSL
9497 if (!acrtc || drm_atomic_crtc_needs_modeset(
9498 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
9499 continue;
9500
b830ebc9 9501 /* Skip any thing not scale or underscan changes */
54d76575 9502 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
9503 continue;
9504
9505 lock_and_validation_needed = true;
9506 }
9507
f6d7c7fa
NK
9508 /**
9509 * Streams and planes are reset when there are changes that affect
9510 * bandwidth. Anything that affects bandwidth needs to go through
9511 * DC global validation to ensure that the configuration can be applied
9512 * to hardware.
9513 *
9514 * We have to currently stall out here in atomic_check for outstanding
9515 * commits to finish in this case because our IRQ handlers reference
9516 * DRM state directly - we can end up disabling interrupts too early
9517 * if we don't.
9518 *
9519 * TODO: Remove this stall and drop DM state private objects.
a87fa993 9520 */
f6d7c7fa 9521 if (lock_and_validation_needed) {
eb3dc897
NK
9522 ret = dm_atomic_get_state(state, &dm_state);
9523 if (ret)
9524 goto fail;
e7b07cee
HW
9525
9526 ret = do_aquire_global_lock(dev, state);
9527 if (ret)
9528 goto fail;
1dc90497 9529
d9fe1a4c 9530#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
9531 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9532 goto fail;
9533
29b9ba74
ML
9534 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9535 if (ret)
9536 goto fail;
d9fe1a4c 9537#endif
29b9ba74 9538
ded58c7b
ZL
9539 /*
9540 * Perform validation of MST topology in the state:
9541 * We need to perform MST atomic check before calling
9542 * dc_validate_global_state(), or there is a chance
9543 * to get stuck in an infinite loop and hang eventually.
9544 */
9545 ret = drm_dp_mst_atomic_check(state);
9546 if (ret)
9547 goto fail;
74a16675
RS
9548 status = dc_validate_global_state(dc, dm_state->context, false);
9549 if (status != DC_OK) {
9550 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9551 dc_status_to_str(status), status);
e7b07cee
HW
9552 ret = -EINVAL;
9553 goto fail;
9554 }
bd200d19 9555 } else {
674e78ac 9556 /*
bd200d19
NK
9557 * The commit is a fast update. Fast updates shouldn't change
9558 * the DC context, affect global validation, and can have their
9559 * commit work done in parallel with other commits not touching
9560 * the same resource. If we have a new DC context as part of
9561 * the DM atomic state from validation we need to free it and
9562 * retain the existing one instead.
fde9f39a
MR
9563 *
9564 * Furthermore, since the DM atomic state only contains the DC
9565 * context and can safely be annulled, we can free the state
9566 * and clear the associated private object now to free
9567 * some memory and avoid a possible use-after-free later.
674e78ac 9568 */
bd200d19 9569
fde9f39a
MR
9570 for (i = 0; i < state->num_private_objs; i++) {
9571 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 9572
fde9f39a
MR
9573 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9574 int j = state->num_private_objs-1;
bd200d19 9575
fde9f39a
MR
9576 dm_atomic_destroy_state(obj,
9577 state->private_objs[i].state);
9578
9579 /* If i is not at the end of the array then the
9580 * last element needs to be moved to where i was
9581 * before the array can safely be truncated.
9582 */
9583 if (i != j)
9584 state->private_objs[i] =
9585 state->private_objs[j];
bd200d19 9586
fde9f39a
MR
9587 state->private_objs[j].ptr = NULL;
9588 state->private_objs[j].state = NULL;
9589 state->private_objs[j].old_state = NULL;
9590 state->private_objs[j].new_state = NULL;
9591
9592 state->num_private_objs = j;
9593 break;
9594 }
bd200d19 9595 }
e7b07cee
HW
9596 }
9597
caff0e66
NK
9598 /* Store the overall update type for use later in atomic check. */
9599 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9600 struct dm_crtc_state *dm_new_crtc_state =
9601 to_dm_crtc_state(new_crtc_state);
9602
f6d7c7fa
NK
9603 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9604 UPDATE_TYPE_FULL :
9605 UPDATE_TYPE_FAST;
e7b07cee
HW
9606 }
9607
9608 /* Must be success */
9609 WARN_ON(ret);
e8a98235
RS
9610
9611 trace_amdgpu_dm_atomic_check_finish(state, ret);
9612
e7b07cee
HW
9613 return ret;
9614
9615fail:
9616 if (ret == -EDEADLK)
01e28f9c 9617 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 9618 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 9619 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 9620 else
01e28f9c 9621 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 9622
e8a98235
RS
9623 trace_amdgpu_dm_atomic_check_finish(state, ret);
9624
e7b07cee
HW
9625 return ret;
9626}
9627
3ee6b26b
AD
9628static bool is_dp_capable_without_timing_msa(struct dc *dc,
9629 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
9630{
9631 uint8_t dpcd_data;
9632 bool capable = false;
9633
c84dec2f 9634 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
9635 dm_helpers_dp_read_dpcd(
9636 NULL,
c84dec2f 9637 amdgpu_dm_connector->dc_link,
e7b07cee
HW
9638 DP_DOWN_STREAM_PORT_COUNT,
9639 &dpcd_data,
9640 sizeof(dpcd_data))) {
9641 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9642 }
9643
9644 return capable;
9645}
98e6436d
AK
9646void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9647 struct edid *edid)
e7b07cee
HW
9648{
9649 int i;
e7b07cee
HW
9650 bool edid_check_required;
9651 struct detailed_timing *timing;
9652 struct detailed_non_pixel *data;
9653 struct detailed_data_monitor_range *range;
c84dec2f
HW
9654 struct amdgpu_dm_connector *amdgpu_dm_connector =
9655 to_amdgpu_dm_connector(connector);
bb47de73 9656 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9657
9658 struct drm_device *dev = connector->dev;
1348969a 9659 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9660 bool freesync_capable = false;
b830ebc9 9661
8218d7f1
HW
9662 if (!connector->state) {
9663 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9664 goto update;
8218d7f1
HW
9665 }
9666
98e6436d
AK
9667 if (!edid) {
9668 dm_con_state = to_dm_connector_state(connector->state);
9669
9670 amdgpu_dm_connector->min_vfreq = 0;
9671 amdgpu_dm_connector->max_vfreq = 0;
9672 amdgpu_dm_connector->pixel_clock_mhz = 0;
9673
bb47de73 9674 goto update;
98e6436d
AK
9675 }
9676
8218d7f1
HW
9677 dm_con_state = to_dm_connector_state(connector->state);
9678
e7b07cee 9679 edid_check_required = false;
c84dec2f 9680 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9681 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9682 goto update;
e7b07cee
HW
9683 }
9684 if (!adev->dm.freesync_module)
bb47de73 9685 goto update;
e7b07cee
HW
9686 /*
9687 * if edid non zero restrict freesync only for dp and edp
9688 */
9689 if (edid) {
c84dec2f
HW
9690 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9691 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9692 edid_check_required = is_dp_capable_without_timing_msa(
9693 adev->dm.dc,
c84dec2f 9694 amdgpu_dm_connector);
e7b07cee
HW
9695 }
9696 }
e7b07cee
HW
9697 if (edid_check_required == true && (edid->version > 1 ||
9698 (edid->version == 1 && edid->revision > 1))) {
9699 for (i = 0; i < 4; i++) {
9700
9701 timing = &edid->detailed_timings[i];
9702 data = &timing->data.other_data;
9703 range = &data->data.range;
9704 /*
9705 * Check if monitor has continuous frequency mode
9706 */
9707 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9708 continue;
9709 /*
9710 * Check for flag range limits only. If flag == 1 then
9711 * no additional timing information provided.
9712 * Default GTF, GTF Secondary curve and CVT are not
9713 * supported
9714 */
9715 if (range->flags != 1)
9716 continue;
9717
c84dec2f
HW
9718 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9719 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9720 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
9721 range->pixel_clock_mhz * 10;
9722 break;
9723 }
9724
c84dec2f 9725 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9726 amdgpu_dm_connector->min_vfreq > 10) {
9727
bb47de73 9728 freesync_capable = true;
e7b07cee
HW
9729 }
9730 }
bb47de73
NK
9731
9732update:
9733 if (dm_con_state)
9734 dm_con_state->freesync_capable = freesync_capable;
9735
9736 if (connector->vrr_capable_property)
9737 drm_connector_set_vrr_capable_property(connector,
9738 freesync_capable);
e7b07cee
HW
9739}
9740
8c322309
RL
9741static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9742{
9743 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9744
9745 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9746 return;
9747 if (link->type == dc_connection_none)
9748 return;
9749 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9750 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9751 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9752
9753 if (dpcd_data[0] == 0) {
1cfbbdde 9754 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9755 link->psr_settings.psr_feature_enabled = false;
9756 } else {
1cfbbdde 9757 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9758 link->psr_settings.psr_feature_enabled = true;
9759 }
9760
9761 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9762 }
9763}
9764
9765/*
9766 * amdgpu_dm_link_setup_psr() - configure psr link
9767 * @stream: stream state
9768 *
9769 * Return: true if success
9770 */
9771static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9772{
9773 struct dc_link *link = NULL;
9774 struct psr_config psr_config = {0};
9775 struct psr_context psr_context = {0};
8c322309
RL
9776 bool ret = false;
9777
9778 if (stream == NULL)
9779 return false;
9780
9781 link = stream->link;
8c322309 9782
d1ebfdd8 9783 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9784
9785 if (psr_config.psr_version > 0) {
9786 psr_config.psr_exit_link_training_required = 0x1;
9787 psr_config.psr_frame_capture_indication_req = 0;
9788 psr_config.psr_rfb_setup_time = 0x37;
9789 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9790 psr_config.allow_smu_optimizations = 0x0;
9791
9792 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9793
9794 }
d1ebfdd8 9795 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9796
9797 return ret;
9798}
9799
9800/*
9801 * amdgpu_dm_psr_enable() - enable psr f/w
9802 * @stream: stream state
9803 *
9804 * Return: true if success
9805 */
9806bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9807{
9808 struct dc_link *link = stream->link;
5b5abe95
AK
9809 unsigned int vsync_rate_hz = 0;
9810 struct dc_static_screen_params params = {0};
9811 /* Calculate number of static frames before generating interrupt to
9812 * enter PSR.
9813 */
5b5abe95
AK
9814 // Init fail safe of 2 frames static
9815 unsigned int num_frames_static = 2;
8c322309
RL
9816
9817 DRM_DEBUG_DRIVER("Enabling psr...\n");
9818
5b5abe95
AK
9819 vsync_rate_hz = div64_u64(div64_u64((
9820 stream->timing.pix_clk_100hz * 100),
9821 stream->timing.v_total),
9822 stream->timing.h_total);
9823
9824 /* Round up
9825 * Calculate number of frames such that at least 30 ms of time has
9826 * passed.
9827 */
7aa62404
RL
9828 if (vsync_rate_hz != 0) {
9829 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9830 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9831 }
5b5abe95
AK
9832
9833 params.triggers.cursor_update = true;
9834 params.triggers.overlay_update = true;
9835 params.triggers.surface_update = true;
9836 params.num_frames = num_frames_static;
8c322309 9837
5b5abe95 9838 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9839 &stream, 1,
5b5abe95 9840 &params);
8c322309 9841
1d496907 9842 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
9843}
9844
9845/*
9846 * amdgpu_dm_psr_disable() - disable psr f/w
9847 * @stream: stream state
9848 *
9849 * Return: true if success
9850 */
9851static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9852{
9853
9854 DRM_DEBUG_DRIVER("Disabling psr...\n");
9855
1d496907 9856 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 9857}
3d4e52d0 9858
6ee90e88 9859/*
9860 * amdgpu_dm_psr_disable() - disable psr f/w
9861 * if psr is enabled on any stream
9862 *
9863 * Return: true if success
9864 */
9865static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9866{
9867 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9868 return dc_set_psr_allow_active(dm->dc, false);
9869}
9870
3d4e52d0
VL
9871void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9872{
1348969a 9873 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
9874 struct dc *dc = adev->dm.dc;
9875 int i;
9876
9877 mutex_lock(&adev->dm.dc_lock);
9878 if (dc->current_state) {
9879 for (i = 0; i < dc->current_state->stream_count; ++i)
9880 dc->current_state->streams[i]
9881 ->triggered_crtc_reset.enabled =
9882 adev->dm.force_timing_sync;
9883
9884 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9885 dc_trigger_sync(dc, dc->current_state);
9886 }
9887 mutex_unlock(&adev->dm.dc_lock);
9888}
9d83722d
RS
9889
9890void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9891 uint32_t value, const char *func_name)
9892{
9893#ifdef DM_CHECK_ADDR_0
9894 if (address == 0) {
9895 DC_ERR("invalid register write. address = 0");
9896 return;
9897 }
9898#endif
9899 cgs_write_register(ctx->cgs_device, address, value);
9900 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9901}
9902
9903uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9904 const char *func_name)
9905{
9906 uint32_t value;
9907#ifdef DM_CHECK_ADDR_0
9908 if (address == 0) {
9909 DC_ERR("invalid register read; address = 0\n");
9910 return 0;
9911 }
9912#endif
9913
9914 if (ctx->dmub_srv &&
9915 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9916 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9917 ASSERT(false);
9918 return 0;
9919 }
9920
9921 value = cgs_read_register(ctx->cgs_device, address);
9922
9923 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9924
9925 return value;
9926}