]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge drm/drm-next into drm-misc-next
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121 * DOC: overview
122 *
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
126 *
127 * The root control structure is &struct amdgpu_display_manager.
128 */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 default:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
151 }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 return;
162
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
165
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
168 subconnector);
169 }
170
171 /*
172 * initializes drm_device display related structures, based on the information
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
175 *
176 * Returns 0 on success
177 */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 struct drm_plane *plane,
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 uint32_t link_index,
192 struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
203
204 static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
206
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 struct drm_crtc_state *new_crtc_state);
219 /*
220 * dm_vblank_get_counter
221 *
222 * @brief
223 * Get counter for number of vertical blanks
224 *
225 * @param
226 * struct amdgpu_device *adev - [in] desired amdgpu device
227 * int disp_idx - [in] which CRTC to get the counter from
228 *
229 * @return
230 * Counter for vertical blanks
231 */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234 if (crtc >= adev->mode_info.num_crtc)
235 return 0;
236 else {
237 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238
239 if (acrtc->dm_irq_params.stream == NULL) {
240 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 crtc);
242 return 0;
243 }
244
245 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 }
247 }
248
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 u32 *vbl, u32 *position)
251 {
252 uint32_t v_blank_start, v_blank_end, h_position, v_position;
253
254 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255 return -EINVAL;
256 else {
257 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
259 if (acrtc->dm_irq_params.stream == NULL) {
260 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 crtc);
262 return 0;
263 }
264
265 /*
266 * TODO rework base driver to use values directly.
267 * for now parse it back into reg-format
268 */
269 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270 &v_blank_start,
271 &v_blank_end,
272 &h_position,
273 &v_position);
274
275 *position = v_position | (h_position << 16);
276 *vbl = v_blank_start | (v_blank_end << 16);
277 }
278
279 return 0;
280 }
281
282 static bool dm_is_idle(void *handle)
283 {
284 /* XXX todo */
285 return true;
286 }
287
288 static int dm_wait_for_idle(void *handle)
289 {
290 /* XXX todo */
291 return 0;
292 }
293
294 static bool dm_check_soft_reset(void *handle)
295 {
296 return false;
297 }
298
299 static int dm_soft_reset(void *handle)
300 {
301 /* XXX todo */
302 return 0;
303 }
304
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307 int otg_inst)
308 {
309 struct drm_device *dev = adev_to_drm(adev);
310 struct drm_crtc *crtc;
311 struct amdgpu_crtc *amdgpu_crtc;
312
313 if (otg_inst == -1) {
314 WARN_ON(1);
315 return adev->mode_info.crtcs[0];
316 }
317
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321 if (amdgpu_crtc->otg_inst == otg_inst)
322 return amdgpu_crtc;
323 }
324
325 return NULL;
326 }
327
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
334 }
335
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
344 {
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
346 return true;
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 return true;
349 else
350 return false;
351 }
352
353 /**
354 * dm_pflip_high_irq() - Handle pageflip interrupt
355 * @interrupt_params: ignored
356 *
357 * Handles the pageflip interrupt by notifying all interested parties
358 * that the pageflip has been completed.
359 */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
365 unsigned long flags;
366 struct drm_pending_vblank_event *e;
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 bool vrr_active;
369
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372 /* IRQ could occur when in initial stage */
373 /* TODO work and BO cleanup */
374 if (amdgpu_crtc == NULL) {
375 DC_LOG_PFLIP("CRTC is null, returning.\n");
376 return;
377 }
378
379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
386 amdgpu_crtc);
387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 return;
389 }
390
391 /* page flip completed. */
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
394
395 if (!e)
396 WARN_ON(1);
397
398 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399
400 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 if (!vrr_active ||
402 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 &v_blank_end, &hpos, &vpos) ||
404 (vpos < v_blank_start)) {
405 /* Update to correct count and vblank timestamp if racing with
406 * vblank irq. This also updates to the correct vblank timestamp
407 * even in VRR mode, as scanout is past the front-porch atm.
408 */
409 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410
411 /* Wake up userspace by sending the pageflip event with proper
412 * count and timestamp of vblank of flip completion.
413 */
414 if (e) {
415 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416
417 /* Event sent, so done with vblank for this flip */
418 drm_crtc_vblank_put(&amdgpu_crtc->base);
419 }
420 } else if (e) {
421 /* VRR active and inside front-porch: vblank count and
422 * timestamp for pageflip event will only be up to date after
423 * drm_crtc_handle_vblank() has been executed from late vblank
424 * irq handler after start of back-porch (vline 0). We queue the
425 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 * updated timestamp and count, once it runs after us.
427 *
428 * We need to open-code this instead of using the helper
429 * drm_crtc_arm_vblank_event(), as that helper would
430 * call drm_crtc_accurate_vblank_count(), which we must
431 * not call in VRR mode while we are in front-porch!
432 */
433
434 /* sequence will be replaced by real count during send-out. */
435 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 e->pipe = amdgpu_crtc->crtc_id;
437
438 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439 e = NULL;
440 }
441
442 /* Keep track of vblank of this flip for flip throttling. We use the
443 * cooked hw counter, as that one incremented at start of this vblank
444 * of pageflip completion, so last_flip_vblank is the forbidden count
445 * for queueing new pageflips if vsync + VRR is enabled.
446 */
447 amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449
450 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452
453 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 amdgpu_crtc->crtc_id, amdgpu_crtc,
455 vrr_active, (int) !e);
456 }
457
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460 struct common_irq_params *irq_params = interrupt_params;
461 struct amdgpu_device *adev = irq_params->adev;
462 struct amdgpu_crtc *acrtc;
463 struct drm_device *drm_dev;
464 struct drm_vblank_crtc *vblank;
465 ktime_t frame_duration_ns, previous_timestamp;
466 unsigned long flags;
467 int vrr_active;
468
469 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470
471 if (acrtc) {
472 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473 drm_dev = acrtc->base.dev;
474 vblank = &drm_dev->vblank[acrtc->base.index];
475 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476 frame_duration_ns = vblank->time - previous_timestamp;
477
478 if (frame_duration_ns > 0) {
479 trace_amdgpu_refresh_rate_track(acrtc->base.index,
480 frame_duration_ns,
481 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482 atomic64_set(&irq_params->previous_timestamp, vblank->time);
483 }
484
485 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486 acrtc->crtc_id,
487 vrr_active);
488
489 /* Core vblank handling is done here after end of front-porch in
490 * vrr mode, as vblank timestamping will give valid results
491 * while now done after front-porch. This will also deliver
492 * page-flip completion events that have been queued to us
493 * if a pageflip happened inside front-porch.
494 */
495 if (vrr_active) {
496 drm_crtc_handle_vblank(&acrtc->base);
497
498 /* BTR processing for pre-DCE12 ASICs */
499 if (acrtc->dm_irq_params.stream &&
500 adev->family < AMDGPU_FAMILY_AI) {
501 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502 mod_freesync_handle_v_update(
503 adev->dm.freesync_module,
504 acrtc->dm_irq_params.stream,
505 &acrtc->dm_irq_params.vrr_params);
506
507 dc_stream_adjust_vmin_vmax(
508 adev->dm.dc,
509 acrtc->dm_irq_params.stream,
510 &acrtc->dm_irq_params.vrr_params.adjust);
511 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512 }
513 }
514 }
515 }
516
517 /**
518 * dm_crtc_high_irq() - Handles CRTC interrupt
519 * @interrupt_params: used for determining the CRTC instance
520 *
521 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522 * event handler.
523 */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526 struct common_irq_params *irq_params = interrupt_params;
527 struct amdgpu_device *adev = irq_params->adev;
528 struct amdgpu_crtc *acrtc;
529 unsigned long flags;
530 int vrr_active;
531
532 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533 if (!acrtc)
534 return;
535
536 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537
538 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539 vrr_active, acrtc->dm_irq_params.active_planes);
540
541 /**
542 * Core vblank handling at start of front-porch is only possible
543 * in non-vrr mode, as only there vblank timestamping will give
544 * valid results while done in front-porch. Otherwise defer it
545 * to dm_vupdate_high_irq after end of front-porch.
546 */
547 if (!vrr_active)
548 drm_crtc_handle_vblank(&acrtc->base);
549
550 /**
551 * Following stuff must happen at start of vblank, for crc
552 * computation and below-the-range btr support in vrr mode.
553 */
554 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555
556 /* BTR updates need to happen before VUPDATE on Vega and above. */
557 if (adev->family < AMDGPU_FAMILY_AI)
558 return;
559
560 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561
562 if (acrtc->dm_irq_params.stream &&
563 acrtc->dm_irq_params.vrr_params.supported &&
564 acrtc->dm_irq_params.freesync_config.state ==
565 VRR_STATE_ACTIVE_VARIABLE) {
566 mod_freesync_handle_v_update(adev->dm.freesync_module,
567 acrtc->dm_irq_params.stream,
568 &acrtc->dm_irq_params.vrr_params);
569
570 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571 &acrtc->dm_irq_params.vrr_params.adjust);
572 }
573
574 /*
575 * If there aren't any active_planes then DCH HUBP may be clock-gated.
576 * In that case, pageflip completion interrupts won't fire and pageflip
577 * completion events won't get delivered. Prevent this by sending
578 * pending pageflip events from here if a flip is still pending.
579 *
580 * If any planes are enabled, use dm_pflip_high_irq() instead, to
581 * avoid race conditions between flip programming and completion,
582 * which could cause too early flip completion events.
583 */
584 if (adev->family >= AMDGPU_FAMILY_RV &&
585 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586 acrtc->dm_irq_params.active_planes == 0) {
587 if (acrtc->event) {
588 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589 acrtc->event = NULL;
590 drm_crtc_vblank_put(&acrtc->base);
591 }
592 acrtc->pflip_status = AMDGPU_FLIP_NONE;
593 }
594
595 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601 * DCN generation ASICs
602 * @interrupt params - interrupt parameters
603 *
604 * Used to set crc window/read out crc value at vertical line 0 position
605 */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609 struct common_irq_params *irq_params = interrupt_params;
610 struct amdgpu_device *adev = irq_params->adev;
611 struct amdgpu_crtc *acrtc;
612
613 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614
615 if (!acrtc)
616 return;
617
618 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622
623 static int dm_set_clockgating_state(void *handle,
624 enum amd_clockgating_state state)
625 {
626 return 0;
627 }
628
629 static int dm_set_powergating_state(void *handle,
630 enum amd_powergating_state state)
631 {
632 return 0;
633 }
634
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637
638 /* Allocate memory for FBC compressed data */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641 struct drm_device *dev = connector->dev;
642 struct amdgpu_device *adev = drm_to_adev(dev);
643 struct dm_compressor_info *compressor = &adev->dm.compressor;
644 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645 struct drm_display_mode *mode;
646 unsigned long max_size = 0;
647
648 if (adev->dm.dc->fbc_compressor == NULL)
649 return;
650
651 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652 return;
653
654 if (compressor->bo_ptr)
655 return;
656
657
658 list_for_each_entry(mode, &connector->modes, head) {
659 if (max_size < mode->htotal * mode->vtotal)
660 max_size = mode->htotal * mode->vtotal;
661 }
662
663 if (max_size) {
664 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666 &compressor->gpu_addr, &compressor->cpu_addr);
667
668 if (r)
669 DRM_ERROR("DM: Failed to initialize FBC\n");
670 else {
671 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673 }
674
675 }
676
677 }
678
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680 int pipe, bool *enabled,
681 unsigned char *buf, int max_bytes)
682 {
683 struct drm_device *dev = dev_get_drvdata(kdev);
684 struct amdgpu_device *adev = drm_to_adev(dev);
685 struct drm_connector *connector;
686 struct drm_connector_list_iter conn_iter;
687 struct amdgpu_dm_connector *aconnector;
688 int ret = 0;
689
690 *enabled = false;
691
692 mutex_lock(&adev->dm.audio_lock);
693
694 drm_connector_list_iter_begin(dev, &conn_iter);
695 drm_for_each_connector_iter(connector, &conn_iter) {
696 aconnector = to_amdgpu_dm_connector(connector);
697 if (aconnector->audio_inst != port)
698 continue;
699
700 *enabled = true;
701 ret = drm_eld_size(connector->eld);
702 memcpy(buf, connector->eld, min(max_bytes, ret));
703
704 break;
705 }
706 drm_connector_list_iter_end(&conn_iter);
707
708 mutex_unlock(&adev->dm.audio_lock);
709
710 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711
712 return ret;
713 }
714
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716 .get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720 struct device *hda_kdev, void *data)
721 {
722 struct drm_device *dev = dev_get_drvdata(kdev);
723 struct amdgpu_device *adev = drm_to_adev(dev);
724 struct drm_audio_component *acomp = data;
725
726 acomp->ops = &amdgpu_dm_audio_component_ops;
727 acomp->dev = kdev;
728 adev->dm.audio_component = acomp;
729
730 return 0;
731 }
732
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734 struct device *hda_kdev, void *data)
735 {
736 struct drm_device *dev = dev_get_drvdata(kdev);
737 struct amdgpu_device *adev = drm_to_adev(dev);
738 struct drm_audio_component *acomp = data;
739
740 acomp->ops = NULL;
741 acomp->dev = NULL;
742 adev->dm.audio_component = NULL;
743 }
744
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746 .bind = amdgpu_dm_audio_component_bind,
747 .unbind = amdgpu_dm_audio_component_unbind,
748 };
749
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752 int i, ret;
753
754 if (!amdgpu_audio)
755 return 0;
756
757 adev->mode_info.audio.enabled = true;
758
759 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760
761 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762 adev->mode_info.audio.pin[i].channels = -1;
763 adev->mode_info.audio.pin[i].rate = -1;
764 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765 adev->mode_info.audio.pin[i].status_bits = 0;
766 adev->mode_info.audio.pin[i].category_code = 0;
767 adev->mode_info.audio.pin[i].connected = false;
768 adev->mode_info.audio.pin[i].id =
769 adev->dm.dc->res_pool->audios[i]->inst;
770 adev->mode_info.audio.pin[i].offset = 0;
771 }
772
773 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774 if (ret < 0)
775 return ret;
776
777 adev->dm.audio_registered = true;
778
779 return 0;
780 }
781
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784 if (!amdgpu_audio)
785 return;
786
787 if (!adev->mode_info.audio.enabled)
788 return;
789
790 if (adev->dm.audio_registered) {
791 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792 adev->dm.audio_registered = false;
793 }
794
795 /* TODO: Disable audio? */
796
797 adev->mode_info.audio.enabled = false;
798 }
799
800 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802 struct drm_audio_component *acomp = adev->dm.audio_component;
803
804 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806
807 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808 pin, -1);
809 }
810 }
811
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814 const struct dmcub_firmware_header_v1_0 *hdr;
815 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817 const struct firmware *dmub_fw = adev->dm.dmub_fw;
818 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819 struct abm *abm = adev->dm.dc->res_pool->abm;
820 struct dmub_srv_hw_params hw_params;
821 enum dmub_status status;
822 const unsigned char *fw_inst_const, *fw_bss_data;
823 uint32_t i, fw_inst_const_size, fw_bss_data_size;
824 bool has_hw_support;
825
826 if (!dmub_srv)
827 /* DMUB isn't supported on the ASIC. */
828 return 0;
829
830 if (!fb_info) {
831 DRM_ERROR("No framebuffer info for DMUB service.\n");
832 return -EINVAL;
833 }
834
835 if (!dmub_fw) {
836 /* Firmware required for DMUB support. */
837 DRM_ERROR("No firmware provided for DMUB.\n");
838 return -EINVAL;
839 }
840
841 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842 if (status != DMUB_STATUS_OK) {
843 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844 return -EINVAL;
845 }
846
847 if (!has_hw_support) {
848 DRM_INFO("DMUB unsupported on ASIC\n");
849 return 0;
850 }
851
852 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853
854 fw_inst_const = dmub_fw->data +
855 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856 PSP_HEADER_BYTES;
857
858 fw_bss_data = dmub_fw->data +
859 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860 le32_to_cpu(hdr->inst_const_bytes);
861
862 /* Copy firmware and bios info into FB memory. */
863 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865
866 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867
868 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869 * amdgpu_ucode_init_single_fw will load dmub firmware
870 * fw_inst_const part to cw0; otherwise, the firmware back door load
871 * will be done by dm_dmub_hw_init
872 */
873 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875 fw_inst_const_size);
876 }
877
878 if (fw_bss_data_size)
879 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880 fw_bss_data, fw_bss_data_size);
881
882 /* Copy firmware bios info into FB memory. */
883 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884 adev->bios_size);
885
886 /* Reset regions that need to be reset. */
887 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889
890 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892
893 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895
896 /* Initialize hardware. */
897 memset(&hw_params, 0, sizeof(hw_params));
898 hw_params.fb_base = adev->gmc.fb_start;
899 hw_params.fb_offset = adev->gmc.aper_base;
900
901 /* backdoor load firmware and trigger dmub running */
902 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903 hw_params.load_inst_const = true;
904
905 if (dmcu)
906 hw_params.psp_version = dmcu->psp_version;
907
908 for (i = 0; i < fb_info->num_fb; ++i)
909 hw_params.fb[i] = &fb_info->fb[i];
910
911 status = dmub_srv_hw_init(dmub_srv, &hw_params);
912 if (status != DMUB_STATUS_OK) {
913 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914 return -EINVAL;
915 }
916
917 /* Wait for firmware load to finish. */
918 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919 if (status != DMUB_STATUS_OK)
920 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921
922 /* Init DMCU and ABM if available. */
923 if (dmcu && abm) {
924 dmcu->funcs->dmcu_init(dmcu);
925 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926 }
927
928 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929 if (!adev->dm.dc->ctx->dmub_srv) {
930 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931 return -ENOMEM;
932 }
933
934 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935 adev->dm.dmcub_fw_version);
936
937 return 0;
938 }
939
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
943 {
944 struct common_irq_params *irq_params = interrupt_params;
945 struct amdgpu_device *adev = irq_params->adev;
946 struct amdgpu_display_manager *dm = &adev->dm;
947 struct dmcub_trace_buf_entry entry = { 0 };
948 uint32_t count = 0;
949
950 do {
951 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953 entry.param0, entry.param1);
954
955 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
957 } else
958 break;
959
960 count++;
961
962 } while (count <= DMUB_TRACE_MAX_READ);
963
964 ASSERT(count <= DMUB_TRACE_MAX_READ);
965 }
966
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969 uint64_t pt_base;
970 uint32_t logical_addr_low;
971 uint32_t logical_addr_high;
972 uint32_t agp_base, agp_bot, agp_top;
973 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974
975 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977
978 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979 /*
980 * Raven2 has a HW issue that it is unable to use the vram which
981 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982 * workaround that increase system aperture high address (add 1)
983 * to get rid of the VM fault and hardware hang.
984 */
985 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986 else
987 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988
989 agp_base = 0;
990 agp_bot = adev->gmc.agp_start >> 24;
991 agp_top = adev->gmc.agp_end >> 24;
992
993
994 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999 page_table_base.low_part = lower_32_bits(pt_base);
1000
1001 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003
1004 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007
1008 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011
1012 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015
1016 pa_config->is_hvm_enabled = 0;
1017
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023
1024 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025 struct amdgpu_display_manager *dm = vblank_work->dm;
1026
1027 mutex_lock(&dm->dc_lock);
1028
1029 if (vblank_work->enable)
1030 dm->active_vblank_irq_count++;
1031 else if(dm->active_vblank_irq_count)
1032 dm->active_vblank_irq_count--;
1033
1034 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035
1036 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037
1038 mutex_unlock(&dm->dc_lock);
1039 }
1040
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043
1044 int max_caps = dc->caps.max_links;
1045 struct vblank_workqueue *vblank_work;
1046 int i = 0;
1047
1048 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049 if (ZERO_OR_NULL_PTR(vblank_work)) {
1050 kfree(vblank_work);
1051 return NULL;
1052 }
1053
1054 for (i = 0; i < max_caps; i++)
1055 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056
1057 return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062 struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064 struct dc_callback_init init_params;
1065 #endif
1066 int r;
1067
1068 adev->dm.ddev = adev_to_drm(adev);
1069 adev->dm.adev = adev;
1070
1071 /* Zero all the fields */
1072 memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074 memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076
1077 mutex_init(&adev->dm.dc_lock);
1078 mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080 spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082
1083 if(amdgpu_dm_irq_init(adev)) {
1084 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085 goto error;
1086 }
1087
1088 init_data.asic_id.chip_family = adev->family;
1089
1090 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092
1093 init_data.asic_id.vram_width = adev->gmc.vram_width;
1094 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095 init_data.asic_id.atombios_base_address =
1096 adev->mode_info.atom_context->bios;
1097
1098 init_data.driver = adev;
1099
1100 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101
1102 if (!adev->dm.cgs_device) {
1103 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104 goto error;
1105 }
1106
1107 init_data.cgs_device = adev->dm.cgs_device;
1108
1109 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110
1111 switch (adev->asic_type) {
1112 case CHIP_CARRIZO:
1113 case CHIP_STONEY:
1114 case CHIP_RAVEN:
1115 case CHIP_RENOIR:
1116 init_data.flags.gpu_vm_support = true;
1117 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118 init_data.flags.disable_dmcu = true;
1119 break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121 case CHIP_VANGOGH:
1122 init_data.flags.gpu_vm_support = true;
1123 break;
1124 #endif
1125 default:
1126 break;
1127 }
1128
1129 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130 init_data.flags.fbc_support = true;
1131
1132 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133 init_data.flags.multi_mon_pp_mclk_switch = true;
1134
1135 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136 init_data.flags.disable_fractional_pwm = true;
1137
1138 init_data.flags.power_down_display_on_boot = true;
1139
1140 INIT_LIST_HEAD(&adev->dm.da_list);
1141 /* Display Core create. */
1142 adev->dm.dc = dc_create(&init_data);
1143
1144 if (adev->dm.dc) {
1145 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146 } else {
1147 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148 goto error;
1149 }
1150
1151 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154 }
1155
1156 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158
1159 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160 adev->dm.dc->debug.disable_stutter = true;
1161
1162 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163 adev->dm.dc->debug.disable_dsc = true;
1164
1165 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166 adev->dm.dc->debug.disable_clock_gate = true;
1167
1168 r = dm_dmub_hw_init(adev);
1169 if (r) {
1170 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171 goto error;
1172 }
1173
1174 dc_hardware_init(adev->dm.dc);
1175
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177 if (adev->apu_flags) {
1178 struct dc_phy_addr_space_config pa_config;
1179
1180 mmhub_read_system_context(adev, &pa_config);
1181
1182 // Call the DC init_memory func
1183 dc_setup_system_context(adev->dm.dc, &pa_config);
1184 }
1185 #endif
1186
1187 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188 if (!adev->dm.freesync_module) {
1189 DRM_ERROR(
1190 "amdgpu: failed to initialize freesync_module.\n");
1191 } else
1192 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193 adev->dm.freesync_module);
1194
1195 amdgpu_dm_init_color_mod();
1196
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198 if (adev->dm.dc->caps.max_links > 0) {
1199 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200
1201 if (!adev->dm.vblank_workqueue)
1202 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203 else
1204 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205 }
1206 #endif
1207
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211
1212 if (!adev->dm.hdcp_workqueue)
1213 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214 else
1215 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216
1217 dc_init_callbacks(adev->dm.dc, &init_params);
1218 }
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223 if (amdgpu_dm_initialize_drm_device(adev)) {
1224 DRM_ERROR(
1225 "amdgpu: failed to initialize sw for display support.\n");
1226 goto error;
1227 }
1228
1229 /* create fake encoders for MST */
1230 dm_dp_create_fake_mst_encoders(adev);
1231
1232 /* TODO: Add_display_info? */
1233
1234 /* TODO use dynamic cursor width */
1235 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237
1238 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239 DRM_ERROR(
1240 "amdgpu: failed to initialize sw for display support.\n");
1241 goto error;
1242 }
1243
1244
1245 DRM_DEBUG_DRIVER("KMS initialized.\n");
1246
1247 return 0;
1248 error:
1249 amdgpu_dm_fini(adev);
1250
1251 return -EINVAL;
1252 }
1253
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256 int i;
1257
1258 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260 }
1261
1262 amdgpu_dm_audio_fini(adev);
1263
1264 amdgpu_dm_destroy_drm_device(&adev->dm);
1265
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267 if (adev->dm.crc_rd_wrk) {
1268 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269 kfree(adev->dm.crc_rd_wrk);
1270 adev->dm.crc_rd_wrk = NULL;
1271 }
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274 if (adev->dm.hdcp_workqueue) {
1275 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276 adev->dm.hdcp_workqueue = NULL;
1277 }
1278
1279 if (adev->dm.dc)
1280 dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284 if (adev->dm.vblank_workqueue) {
1285 adev->dm.vblank_workqueue->dm = NULL;
1286 kfree(adev->dm.vblank_workqueue);
1287 adev->dm.vblank_workqueue = NULL;
1288 }
1289 #endif
1290
1291 if (adev->dm.dc->ctx->dmub_srv) {
1292 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293 adev->dm.dc->ctx->dmub_srv = NULL;
1294 }
1295
1296 if (adev->dm.dmub_bo)
1297 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298 &adev->dm.dmub_bo_gpu_addr,
1299 &adev->dm.dmub_bo_cpu_addr);
1300
1301 /* DC Destroy TODO: Replace destroy DAL */
1302 if (adev->dm.dc)
1303 dc_destroy(&adev->dm.dc);
1304 /*
1305 * TODO: pageflip, vlank interrupt
1306 *
1307 * amdgpu_dm_irq_fini(adev);
1308 */
1309
1310 if (adev->dm.cgs_device) {
1311 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312 adev->dm.cgs_device = NULL;
1313 }
1314 if (adev->dm.freesync_module) {
1315 mod_freesync_destroy(adev->dm.freesync_module);
1316 adev->dm.freesync_module = NULL;
1317 }
1318
1319 mutex_destroy(&adev->dm.audio_lock);
1320 mutex_destroy(&adev->dm.dc_lock);
1321
1322 return;
1323 }
1324
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327 const char *fw_name_dmcu = NULL;
1328 int r;
1329 const struct dmcu_firmware_header_v1_0 *hdr;
1330
1331 switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333 case CHIP_TAHITI:
1334 case CHIP_PITCAIRN:
1335 case CHIP_VERDE:
1336 case CHIP_OLAND:
1337 #endif
1338 case CHIP_BONAIRE:
1339 case CHIP_HAWAII:
1340 case CHIP_KAVERI:
1341 case CHIP_KABINI:
1342 case CHIP_MULLINS:
1343 case CHIP_TONGA:
1344 case CHIP_FIJI:
1345 case CHIP_CARRIZO:
1346 case CHIP_STONEY:
1347 case CHIP_POLARIS11:
1348 case CHIP_POLARIS10:
1349 case CHIP_POLARIS12:
1350 case CHIP_VEGAM:
1351 case CHIP_VEGA10:
1352 case CHIP_VEGA12:
1353 case CHIP_VEGA20:
1354 case CHIP_NAVI10:
1355 case CHIP_NAVI14:
1356 case CHIP_RENOIR:
1357 case CHIP_SIENNA_CICHLID:
1358 case CHIP_NAVY_FLOUNDER:
1359 case CHIP_DIMGREY_CAVEFISH:
1360 case CHIP_VANGOGH:
1361 return 0;
1362 case CHIP_NAVI12:
1363 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364 break;
1365 case CHIP_RAVEN:
1366 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370 else
1371 return 0;
1372 break;
1373 default:
1374 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375 return -EINVAL;
1376 }
1377
1378 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380 return 0;
1381 }
1382
1383 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384 if (r == -ENOENT) {
1385 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387 adev->dm.fw_dmcu = NULL;
1388 return 0;
1389 }
1390 if (r) {
1391 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392 fw_name_dmcu);
1393 return r;
1394 }
1395
1396 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397 if (r) {
1398 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399 fw_name_dmcu);
1400 release_firmware(adev->dm.fw_dmcu);
1401 adev->dm.fw_dmcu = NULL;
1402 return r;
1403 }
1404
1405 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408 adev->firmware.fw_size +=
1409 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410
1411 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413 adev->firmware.fw_size +=
1414 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415
1416 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417
1418 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419
1420 return 0;
1421 }
1422
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425 struct amdgpu_device *adev = ctx;
1426
1427 return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431 uint32_t value)
1432 {
1433 struct amdgpu_device *adev = ctx;
1434
1435 return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440 struct dmub_srv_create_params create_params;
1441 struct dmub_srv_region_params region_params;
1442 struct dmub_srv_region_info region_info;
1443 struct dmub_srv_fb_params fb_params;
1444 struct dmub_srv_fb_info *fb_info;
1445 struct dmub_srv *dmub_srv;
1446 const struct dmcub_firmware_header_v1_0 *hdr;
1447 const char *fw_name_dmub;
1448 enum dmub_asic dmub_asic;
1449 enum dmub_status status;
1450 int r;
1451
1452 switch (adev->asic_type) {
1453 case CHIP_RENOIR:
1454 dmub_asic = DMUB_ASIC_DCN21;
1455 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458 break;
1459 case CHIP_SIENNA_CICHLID:
1460 dmub_asic = DMUB_ASIC_DCN30;
1461 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462 break;
1463 case CHIP_NAVY_FLOUNDER:
1464 dmub_asic = DMUB_ASIC_DCN30;
1465 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466 break;
1467 case CHIP_VANGOGH:
1468 dmub_asic = DMUB_ASIC_DCN301;
1469 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470 break;
1471 case CHIP_DIMGREY_CAVEFISH:
1472 dmub_asic = DMUB_ASIC_DCN302;
1473 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474 break;
1475
1476 default:
1477 /* ASIC doesn't support DMUB. */
1478 return 0;
1479 }
1480
1481 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482 if (r) {
1483 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484 return 0;
1485 }
1486
1487 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488 if (r) {
1489 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490 return 0;
1491 }
1492
1493 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494
1495 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497 AMDGPU_UCODE_ID_DMCUB;
1498 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499 adev->dm.dmub_fw;
1500 adev->firmware.fw_size +=
1501 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502
1503 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504 adev->dm.dmcub_fw_version);
1505 }
1506
1507 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508
1509 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510 dmub_srv = adev->dm.dmub_srv;
1511
1512 if (!dmub_srv) {
1513 DRM_ERROR("Failed to allocate DMUB service!\n");
1514 return -ENOMEM;
1515 }
1516
1517 memset(&create_params, 0, sizeof(create_params));
1518 create_params.user_ctx = adev;
1519 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521 create_params.asic = dmub_asic;
1522
1523 /* Create the DMUB service. */
1524 status = dmub_srv_create(dmub_srv, &create_params);
1525 if (status != DMUB_STATUS_OK) {
1526 DRM_ERROR("Error creating DMUB service: %d\n", status);
1527 return -EINVAL;
1528 }
1529
1530 /* Calculate the size of all the regions for the DMUB service. */
1531 memset(&region_params, 0, sizeof(region_params));
1532
1533 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536 region_params.vbios_size = adev->bios_size;
1537 region_params.fw_bss_data = region_params.bss_data_size ?
1538 adev->dm.dmub_fw->data +
1539 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541 region_params.fw_inst_const =
1542 adev->dm.dmub_fw->data +
1543 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544 PSP_HEADER_BYTES;
1545
1546 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547 &region_info);
1548
1549 if (status != DMUB_STATUS_OK) {
1550 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551 return -EINVAL;
1552 }
1553
1554 /*
1555 * Allocate a framebuffer based on the total size of all the regions.
1556 * TODO: Move this into GART.
1557 */
1558 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560 &adev->dm.dmub_bo_gpu_addr,
1561 &adev->dm.dmub_bo_cpu_addr);
1562 if (r)
1563 return r;
1564
1565 /* Rebase the regions on the framebuffer address. */
1566 memset(&fb_params, 0, sizeof(fb_params));
1567 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569 fb_params.region_info = &region_info;
1570
1571 adev->dm.dmub_fb_info =
1572 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573 fb_info = adev->dm.dmub_fb_info;
1574
1575 if (!fb_info) {
1576 DRM_ERROR(
1577 "Failed to allocate framebuffer info for DMUB service!\n");
1578 return -ENOMEM;
1579 }
1580
1581 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582 if (status != DMUB_STATUS_OK) {
1583 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584 return -EINVAL;
1585 }
1586
1587 return 0;
1588 }
1589
1590 static int dm_sw_init(void *handle)
1591 {
1592 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593 int r;
1594
1595 r = dm_dmub_sw_init(adev);
1596 if (r)
1597 return r;
1598
1599 return load_dmcu_fw(adev);
1600 }
1601
1602 static int dm_sw_fini(void *handle)
1603 {
1604 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605
1606 kfree(adev->dm.dmub_fb_info);
1607 adev->dm.dmub_fb_info = NULL;
1608
1609 if (adev->dm.dmub_srv) {
1610 dmub_srv_destroy(adev->dm.dmub_srv);
1611 adev->dm.dmub_srv = NULL;
1612 }
1613
1614 release_firmware(adev->dm.dmub_fw);
1615 adev->dm.dmub_fw = NULL;
1616
1617 release_firmware(adev->dm.fw_dmcu);
1618 adev->dm.fw_dmcu = NULL;
1619
1620 return 0;
1621 }
1622
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625 struct amdgpu_dm_connector *aconnector;
1626 struct drm_connector *connector;
1627 struct drm_connector_list_iter iter;
1628 int ret = 0;
1629
1630 drm_connector_list_iter_begin(dev, &iter);
1631 drm_for_each_connector_iter(connector, &iter) {
1632 aconnector = to_amdgpu_dm_connector(connector);
1633 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634 aconnector->mst_mgr.aux) {
1635 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636 aconnector,
1637 aconnector->base.base.id);
1638
1639 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640 if (ret < 0) {
1641 DRM_ERROR("DM_MST: Failed to start MST\n");
1642 aconnector->dc_link->type =
1643 dc_connection_single;
1644 break;
1645 }
1646 }
1647 }
1648 drm_connector_list_iter_end(&iter);
1649
1650 return ret;
1651 }
1652
1653 static int dm_late_init(void *handle)
1654 {
1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656
1657 struct dmcu_iram_parameters params;
1658 unsigned int linear_lut[16];
1659 int i;
1660 struct dmcu *dmcu = NULL;
1661 bool ret = true;
1662
1663 dmcu = adev->dm.dc->res_pool->dmcu;
1664
1665 for (i = 0; i < 16; i++)
1666 linear_lut[i] = 0xFFFF * i / 15;
1667
1668 params.set = 0;
1669 params.backlight_ramping_start = 0xCCCC;
1670 params.backlight_ramping_reduction = 0xCCCCCCCC;
1671 params.backlight_lut_array_size = 16;
1672 params.backlight_lut_array = linear_lut;
1673
1674 /* Min backlight level after ABM reduction, Don't allow below 1%
1675 * 0xFFFF x 0.01 = 0x28F
1676 */
1677 params.min_abm_backlight = 0x28F;
1678
1679 /* In the case where abm is implemented on dmcub,
1680 * dmcu object will be null.
1681 * ABM 2.4 and up are implemented on dmcub.
1682 */
1683 if (dmcu)
1684 ret = dmcu_load_iram(dmcu, params);
1685 else if (adev->dm.dc->ctx->dmub_srv)
1686 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687
1688 if (!ret)
1689 return -EINVAL;
1690
1691 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696 struct amdgpu_dm_connector *aconnector;
1697 struct drm_connector *connector;
1698 struct drm_connector_list_iter iter;
1699 struct drm_dp_mst_topology_mgr *mgr;
1700 int ret;
1701 bool need_hotplug = false;
1702
1703 drm_connector_list_iter_begin(dev, &iter);
1704 drm_for_each_connector_iter(connector, &iter) {
1705 aconnector = to_amdgpu_dm_connector(connector);
1706 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707 aconnector->mst_port)
1708 continue;
1709
1710 mgr = &aconnector->mst_mgr;
1711
1712 if (suspend) {
1713 drm_dp_mst_topology_mgr_suspend(mgr);
1714 } else {
1715 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716 if (ret < 0) {
1717 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718 need_hotplug = true;
1719 }
1720 }
1721 }
1722 drm_connector_list_iter_end(&iter);
1723
1724 if (need_hotplug)
1725 drm_kms_helper_hotplug_event(dev);
1726 }
1727
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730 struct smu_context *smu = &adev->smu;
1731 int ret = 0;
1732
1733 if (!is_support_sw_smu(adev))
1734 return 0;
1735
1736 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737 * on window driver dc implementation.
1738 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739 * should be passed to smu during boot up and resume from s3.
1740 * boot up: dc calculate dcn watermark clock settings within dc_create,
1741 * dcn20_resource_construct
1742 * then call pplib functions below to pass the settings to smu:
1743 * smu_set_watermarks_for_clock_ranges
1744 * smu_set_watermarks_table
1745 * navi10_set_watermarks_table
1746 * smu_write_watermarks_table
1747 *
1748 * For Renoir, clock settings of dcn watermark are also fixed values.
1749 * dc has implemented different flow for window driver:
1750 * dc_hardware_init / dc_set_power_state
1751 * dcn10_init_hw
1752 * notify_wm_ranges
1753 * set_wm_ranges
1754 * -- Linux
1755 * smu_set_watermarks_for_clock_ranges
1756 * renoir_set_watermarks_table
1757 * smu_write_watermarks_table
1758 *
1759 * For Linux,
1760 * dc_hardware_init -> amdgpu_dm_init
1761 * dc_set_power_state --> dm_resume
1762 *
1763 * therefore, this function apply to navi10/12/14 but not Renoir
1764 * *
1765 */
1766 switch(adev->asic_type) {
1767 case CHIP_NAVI10:
1768 case CHIP_NAVI14:
1769 case CHIP_NAVI12:
1770 break;
1771 default:
1772 return 0;
1773 }
1774
1775 ret = smu_write_watermarks_table(smu);
1776 if (ret) {
1777 DRM_ERROR("Failed to update WMTABLE!\n");
1778 return ret;
1779 }
1780
1781 return 0;
1782 }
1783
1784 /**
1785 * dm_hw_init() - Initialize DC device
1786 * @handle: The base driver device containing the amdgpu_dm device.
1787 *
1788 * Initialize the &struct amdgpu_display_manager device. This involves calling
1789 * the initializers of each DM component, then populating the struct with them.
1790 *
1791 * Although the function implies hardware initialization, both hardware and
1792 * software are initialized here. Splitting them out to their relevant init
1793 * hooks is a future TODO item.
1794 *
1795 * Some notable things that are initialized here:
1796 *
1797 * - Display Core, both software and hardware
1798 * - DC modules that we need (freesync and color management)
1799 * - DRM software states
1800 * - Interrupt sources and handlers
1801 * - Vblank support
1802 * - Debug FS entries, if enabled
1803 */
1804 static int dm_hw_init(void *handle)
1805 {
1806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 /* Create DAL display manager */
1808 amdgpu_dm_init(adev);
1809 amdgpu_dm_hpd_init(adev);
1810
1811 return 0;
1812 }
1813
1814 /**
1815 * dm_hw_fini() - Teardown DC device
1816 * @handle: The base driver device containing the amdgpu_dm device.
1817 *
1818 * Teardown components within &struct amdgpu_display_manager that require
1819 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820 * were loaded. Also flush IRQ workqueues and disable them.
1821 */
1822 static int dm_hw_fini(void *handle)
1823 {
1824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825
1826 amdgpu_dm_hpd_fini(adev);
1827
1828 amdgpu_dm_irq_fini(adev);
1829 amdgpu_dm_fini(adev);
1830 return 0;
1831 }
1832
1833
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838 struct dc_state *state, bool enable)
1839 {
1840 enum dc_irq_source irq_source;
1841 struct amdgpu_crtc *acrtc;
1842 int rc = -EBUSY;
1843 int i = 0;
1844
1845 for (i = 0; i < state->stream_count; i++) {
1846 acrtc = get_crtc_by_otg_inst(
1847 adev, state->stream_status[i].primary_otg_inst);
1848
1849 if (acrtc && state->stream_status[i].plane_count != 0) {
1850 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853 acrtc->crtc_id, enable ? "en" : "dis", rc);
1854 if (rc)
1855 DRM_WARN("Failed to %s pflip interrupts\n",
1856 enable ? "enable" : "disable");
1857
1858 if (enable) {
1859 rc = dm_enable_vblank(&acrtc->base);
1860 if (rc)
1861 DRM_WARN("Failed to enable vblank interrupts\n");
1862 } else {
1863 dm_disable_vblank(&acrtc->base);
1864 }
1865
1866 }
1867 }
1868
1869 }
1870
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873 struct dc_state *context = NULL;
1874 enum dc_status res = DC_ERROR_UNEXPECTED;
1875 int i;
1876 struct dc_stream_state *del_streams[MAX_PIPES];
1877 int del_streams_count = 0;
1878
1879 memset(del_streams, 0, sizeof(del_streams));
1880
1881 context = dc_create_state(dc);
1882 if (context == NULL)
1883 goto context_alloc_fail;
1884
1885 dc_resource_state_copy_construct_current(dc, context);
1886
1887 /* First remove from context all streams */
1888 for (i = 0; i < context->stream_count; i++) {
1889 struct dc_stream_state *stream = context->streams[i];
1890
1891 del_streams[del_streams_count++] = stream;
1892 }
1893
1894 /* Remove all planes for removed streams and then remove the streams */
1895 for (i = 0; i < del_streams_count; i++) {
1896 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897 res = DC_FAIL_DETACH_SURFACES;
1898 goto fail;
1899 }
1900
1901 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902 if (res != DC_OK)
1903 goto fail;
1904 }
1905
1906
1907 res = dc_validate_global_state(dc, context, false);
1908
1909 if (res != DC_OK) {
1910 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911 goto fail;
1912 }
1913
1914 res = dc_commit_state(dc, context);
1915
1916 fail:
1917 dc_release_state(context);
1918
1919 context_alloc_fail:
1920 return res;
1921 }
1922
1923 static int dm_suspend(void *handle)
1924 {
1925 struct amdgpu_device *adev = handle;
1926 struct amdgpu_display_manager *dm = &adev->dm;
1927 int ret = 0;
1928
1929 if (amdgpu_in_reset(adev)) {
1930 mutex_lock(&dm->dc_lock);
1931
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933 dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935
1936 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937
1938 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939
1940 amdgpu_dm_commit_zero_streams(dm->dc);
1941
1942 amdgpu_dm_irq_suspend(adev);
1943
1944 return ret;
1945 }
1946
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948 amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950 WARN_ON(adev->dm.cached_state);
1951 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952
1953 s3_handle_mst(adev_to_drm(adev), true);
1954
1955 amdgpu_dm_irq_suspend(adev);
1956
1957
1958 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959
1960 return 0;
1961 }
1962
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965 struct drm_crtc *crtc)
1966 {
1967 uint32_t i;
1968 struct drm_connector_state *new_con_state;
1969 struct drm_connector *connector;
1970 struct drm_crtc *crtc_from_state;
1971
1972 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973 crtc_from_state = new_con_state->crtc;
1974
1975 if (crtc_from_state == crtc)
1976 return to_amdgpu_dm_connector(connector);
1977 }
1978
1979 return NULL;
1980 }
1981
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984 struct dc_sink_init_data sink_init_data = { 0 };
1985 struct display_sink_capability sink_caps = { 0 };
1986 enum dc_edid_status edid_status;
1987 struct dc_context *dc_ctx = link->ctx;
1988 struct dc_sink *sink = NULL;
1989 struct dc_sink *prev_sink = NULL;
1990
1991 link->type = dc_connection_none;
1992 prev_sink = link->local_sink;
1993
1994 if (prev_sink)
1995 dc_sink_release(prev_sink);
1996
1997 switch (link->connector_signal) {
1998 case SIGNAL_TYPE_HDMI_TYPE_A: {
1999 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001 break;
2002 }
2003
2004 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007 break;
2008 }
2009
2010 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013 break;
2014 }
2015
2016 case SIGNAL_TYPE_LVDS: {
2017 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018 sink_caps.signal = SIGNAL_TYPE_LVDS;
2019 break;
2020 }
2021
2022 case SIGNAL_TYPE_EDP: {
2023 sink_caps.transaction_type =
2024 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025 sink_caps.signal = SIGNAL_TYPE_EDP;
2026 break;
2027 }
2028
2029 case SIGNAL_TYPE_DISPLAY_PORT: {
2030 sink_caps.transaction_type =
2031 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033 break;
2034 }
2035
2036 default:
2037 DC_ERROR("Invalid connector type! signal:%d\n",
2038 link->connector_signal);
2039 return;
2040 }
2041
2042 sink_init_data.link = link;
2043 sink_init_data.sink_signal = sink_caps.signal;
2044
2045 sink = dc_sink_create(&sink_init_data);
2046 if (!sink) {
2047 DC_ERROR("Failed to create sink!\n");
2048 return;
2049 }
2050
2051 /* dc_sink_create returns a new reference */
2052 link->local_sink = sink;
2053
2054 edid_status = dm_helpers_read_local_edid(
2055 link->ctx,
2056 link,
2057 sink);
2058
2059 if (edid_status != EDID_OK)
2060 DC_ERROR("Failed to read EDID");
2061
2062 }
2063
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065 struct amdgpu_display_manager *dm)
2066 {
2067 struct {
2068 struct dc_surface_update surface_updates[MAX_SURFACES];
2069 struct dc_plane_info plane_infos[MAX_SURFACES];
2070 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072 struct dc_stream_update stream_update;
2073 } * bundle;
2074 int k, m;
2075
2076 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077
2078 if (!bundle) {
2079 dm_error("Failed to allocate update bundle\n");
2080 goto cleanup;
2081 }
2082
2083 for (k = 0; k < dc_state->stream_count; k++) {
2084 bundle->stream_update.stream = dc_state->streams[k];
2085
2086 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087 bundle->surface_updates[m].surface =
2088 dc_state->stream_status->plane_states[m];
2089 bundle->surface_updates[m].surface->force_full_update =
2090 true;
2091 }
2092 dc_commit_updates_for_stream(
2093 dm->dc, bundle->surface_updates,
2094 dc_state->stream_status->plane_count,
2095 dc_state->streams[k], &bundle->stream_update, dc_state);
2096 }
2097
2098 cleanup:
2099 kfree(bundle);
2100
2101 return;
2102 }
2103
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106 struct dc_stream_state *stream_state;
2107 struct amdgpu_dm_connector *aconnector = link->priv;
2108 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109 struct dc_stream_update stream_update;
2110 bool dpms_off = true;
2111
2112 memset(&stream_update, 0, sizeof(stream_update));
2113 stream_update.dpms_off = &dpms_off;
2114
2115 mutex_lock(&adev->dm.dc_lock);
2116 stream_state = dc_stream_find_from_link(link);
2117
2118 if (stream_state == NULL) {
2119 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120 mutex_unlock(&adev->dm.dc_lock);
2121 return;
2122 }
2123
2124 stream_update.stream = stream_state;
2125 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126 stream_state, &stream_update,
2127 stream_state->ctx->dc->current_state);
2128 mutex_unlock(&adev->dm.dc_lock);
2129 }
2130
2131 static int dm_resume(void *handle)
2132 {
2133 struct amdgpu_device *adev = handle;
2134 struct drm_device *ddev = adev_to_drm(adev);
2135 struct amdgpu_display_manager *dm = &adev->dm;
2136 struct amdgpu_dm_connector *aconnector;
2137 struct drm_connector *connector;
2138 struct drm_connector_list_iter iter;
2139 struct drm_crtc *crtc;
2140 struct drm_crtc_state *new_crtc_state;
2141 struct dm_crtc_state *dm_new_crtc_state;
2142 struct drm_plane *plane;
2143 struct drm_plane_state *new_plane_state;
2144 struct dm_plane_state *dm_new_plane_state;
2145 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146 enum dc_connection_type new_connection_type = dc_connection_none;
2147 struct dc_state *dc_state;
2148 int i, r, j;
2149
2150 if (amdgpu_in_reset(adev)) {
2151 dc_state = dm->cached_dc_state;
2152
2153 r = dm_dmub_hw_init(adev);
2154 if (r)
2155 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158 dc_resume(dm->dc);
2159
2160 amdgpu_dm_irq_resume_early(adev);
2161
2162 for (i = 0; i < dc_state->stream_count; i++) {
2163 dc_state->streams[i]->mode_changed = true;
2164 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165 dc_state->stream_status->plane_states[j]->update_flags.raw
2166 = 0xffffffff;
2167 }
2168 }
2169
2170 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171
2172 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173
2174 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175
2176 dc_release_state(dm->cached_dc_state);
2177 dm->cached_dc_state = NULL;
2178
2179 amdgpu_dm_irq_resume_late(adev);
2180
2181 mutex_unlock(&dm->dc_lock);
2182
2183 return 0;
2184 }
2185 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186 dc_release_state(dm_state->context);
2187 dm_state->context = dc_create_state(dm->dc);
2188 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189 dc_resource_state_construct(dm->dc, dm_state->context);
2190
2191 /* Before powering on DC we need to re-initialize DMUB. */
2192 r = dm_dmub_hw_init(adev);
2193 if (r)
2194 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195
2196 /* power on hardware */
2197 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198
2199 /* program HPD filter */
2200 dc_resume(dm->dc);
2201
2202 /*
2203 * early enable HPD Rx IRQ, should be done before set mode as short
2204 * pulse interrupts are used for MST
2205 */
2206 amdgpu_dm_irq_resume_early(adev);
2207
2208 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209 s3_handle_mst(ddev, false);
2210
2211 /* Do detection*/
2212 drm_connector_list_iter_begin(ddev, &iter);
2213 drm_for_each_connector_iter(connector, &iter) {
2214 aconnector = to_amdgpu_dm_connector(connector);
2215
2216 /*
2217 * this is the case when traversing through already created
2218 * MST connectors, should be skipped
2219 */
2220 if (aconnector->mst_port)
2221 continue;
2222
2223 mutex_lock(&aconnector->hpd_lock);
2224 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225 DRM_ERROR("KMS: Failed to detect connector\n");
2226
2227 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228 emulated_link_detect(aconnector->dc_link);
2229 else
2230 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231
2232 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233 aconnector->fake_enable = false;
2234
2235 if (aconnector->dc_sink)
2236 dc_sink_release(aconnector->dc_sink);
2237 aconnector->dc_sink = NULL;
2238 amdgpu_dm_update_connector_after_detect(aconnector);
2239 mutex_unlock(&aconnector->hpd_lock);
2240 }
2241 drm_connector_list_iter_end(&iter);
2242
2243 /* Force mode set in atomic commit */
2244 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245 new_crtc_state->active_changed = true;
2246
2247 /*
2248 * atomic_check is expected to create the dc states. We need to release
2249 * them here, since they were duplicated as part of the suspend
2250 * procedure.
2251 */
2252 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254 if (dm_new_crtc_state->stream) {
2255 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256 dc_stream_release(dm_new_crtc_state->stream);
2257 dm_new_crtc_state->stream = NULL;
2258 }
2259 }
2260
2261 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263 if (dm_new_plane_state->dc_state) {
2264 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265 dc_plane_state_release(dm_new_plane_state->dc_state);
2266 dm_new_plane_state->dc_state = NULL;
2267 }
2268 }
2269
2270 drm_atomic_helper_resume(ddev, dm->cached_state);
2271
2272 dm->cached_state = NULL;
2273
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275 amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277
2278 amdgpu_dm_irq_resume_late(adev);
2279
2280 amdgpu_dm_smu_write_watermarks_table(adev);
2281
2282 return 0;
2283 }
2284
2285 /**
2286 * DOC: DM Lifecycle
2287 *
2288 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290 * the base driver's device list to be initialized and torn down accordingly.
2291 *
2292 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293 */
2294
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296 .name = "dm",
2297 .early_init = dm_early_init,
2298 .late_init = dm_late_init,
2299 .sw_init = dm_sw_init,
2300 .sw_fini = dm_sw_fini,
2301 .hw_init = dm_hw_init,
2302 .hw_fini = dm_hw_fini,
2303 .suspend = dm_suspend,
2304 .resume = dm_resume,
2305 .is_idle = dm_is_idle,
2306 .wait_for_idle = dm_wait_for_idle,
2307 .check_soft_reset = dm_check_soft_reset,
2308 .soft_reset = dm_soft_reset,
2309 .set_clockgating_state = dm_set_clockgating_state,
2310 .set_powergating_state = dm_set_powergating_state,
2311 };
2312
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315 .type = AMD_IP_BLOCK_TYPE_DCE,
2316 .major = 1,
2317 .minor = 0,
2318 .rev = 0,
2319 .funcs = &amdgpu_dm_funcs,
2320 };
2321
2322
2323 /**
2324 * DOC: atomic
2325 *
2326 * *WIP*
2327 */
2328
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330 .fb_create = amdgpu_display_user_framebuffer_create,
2331 .get_format_info = amd_get_format_info,
2332 .output_poll_changed = drm_fb_helper_output_poll_changed,
2333 .atomic_check = amdgpu_dm_atomic_check,
2334 .atomic_commit = drm_atomic_helper_commit,
2335 };
2336
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343 u32 max_cll, min_cll, max, min, q, r;
2344 struct amdgpu_dm_backlight_caps *caps;
2345 struct amdgpu_display_manager *dm;
2346 struct drm_connector *conn_base;
2347 struct amdgpu_device *adev;
2348 struct dc_link *link = NULL;
2349 static const u8 pre_computed_values[] = {
2350 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352
2353 if (!aconnector || !aconnector->dc_link)
2354 return;
2355
2356 link = aconnector->dc_link;
2357 if (link->connector_signal != SIGNAL_TYPE_EDP)
2358 return;
2359
2360 conn_base = &aconnector->base;
2361 adev = drm_to_adev(conn_base->dev);
2362 dm = &adev->dm;
2363 caps = &dm->backlight_caps;
2364 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365 caps->aux_support = false;
2366 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368
2369 if (caps->ext_caps->bits.oled == 1 ||
2370 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372 caps->aux_support = true;
2373
2374 if (amdgpu_backlight == 0)
2375 caps->aux_support = false;
2376 else if (amdgpu_backlight == 1)
2377 caps->aux_support = true;
2378
2379 /* From the specification (CTA-861-G), for calculating the maximum
2380 * luminance we need to use:
2381 * Luminance = 50*2**(CV/32)
2382 * Where CV is a one-byte value.
2383 * For calculating this expression we may need float point precision;
2384 * to avoid this complexity level, we take advantage that CV is divided
2385 * by a constant. From the Euclids division algorithm, we know that CV
2386 * can be written as: CV = 32*q + r. Next, we replace CV in the
2387 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388 * need to pre-compute the value of r/32. For pre-computing the values
2389 * We just used the following Ruby line:
2390 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391 * The results of the above expressions can be verified at
2392 * pre_computed_values.
2393 */
2394 q = max_cll >> 5;
2395 r = max_cll % 32;
2396 max = (1 << q) * pre_computed_values[r];
2397
2398 // min luminance: maxLum * (CV/255)^2 / 100
2399 q = DIV_ROUND_CLOSEST(min_cll, 255);
2400 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401
2402 caps->aux_max_input_signal = max;
2403 caps->aux_min_input_signal = min;
2404 }
2405
2406 void amdgpu_dm_update_connector_after_detect(
2407 struct amdgpu_dm_connector *aconnector)
2408 {
2409 struct drm_connector *connector = &aconnector->base;
2410 struct drm_device *dev = connector->dev;
2411 struct dc_sink *sink;
2412
2413 /* MST handled by drm_mst framework */
2414 if (aconnector->mst_mgr.mst_state == true)
2415 return;
2416
2417 sink = aconnector->dc_link->local_sink;
2418 if (sink)
2419 dc_sink_retain(sink);
2420
2421 /*
2422 * Edid mgmt connector gets first update only in mode_valid hook and then
2423 * the connector sink is set to either fake or physical sink depends on link status.
2424 * Skip if already done during boot.
2425 */
2426 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427 && aconnector->dc_em_sink) {
2428
2429 /*
2430 * For S3 resume with headless use eml_sink to fake stream
2431 * because on resume connector->sink is set to NULL
2432 */
2433 mutex_lock(&dev->mode_config.mutex);
2434
2435 if (sink) {
2436 if (aconnector->dc_sink) {
2437 amdgpu_dm_update_freesync_caps(connector, NULL);
2438 /*
2439 * retain and release below are used to
2440 * bump up refcount for sink because the link doesn't point
2441 * to it anymore after disconnect, so on next crtc to connector
2442 * reshuffle by UMD we will get into unwanted dc_sink release
2443 */
2444 dc_sink_release(aconnector->dc_sink);
2445 }
2446 aconnector->dc_sink = sink;
2447 dc_sink_retain(aconnector->dc_sink);
2448 amdgpu_dm_update_freesync_caps(connector,
2449 aconnector->edid);
2450 } else {
2451 amdgpu_dm_update_freesync_caps(connector, NULL);
2452 if (!aconnector->dc_sink) {
2453 aconnector->dc_sink = aconnector->dc_em_sink;
2454 dc_sink_retain(aconnector->dc_sink);
2455 }
2456 }
2457
2458 mutex_unlock(&dev->mode_config.mutex);
2459
2460 if (sink)
2461 dc_sink_release(sink);
2462 return;
2463 }
2464
2465 /*
2466 * TODO: temporary guard to look for proper fix
2467 * if this sink is MST sink, we should not do anything
2468 */
2469 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470 dc_sink_release(sink);
2471 return;
2472 }
2473
2474 if (aconnector->dc_sink == sink) {
2475 /*
2476 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477 * Do nothing!!
2478 */
2479 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480 aconnector->connector_id);
2481 if (sink)
2482 dc_sink_release(sink);
2483 return;
2484 }
2485
2486 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487 aconnector->connector_id, aconnector->dc_sink, sink);
2488
2489 mutex_lock(&dev->mode_config.mutex);
2490
2491 /*
2492 * 1. Update status of the drm connector
2493 * 2. Send an event and let userspace tell us what to do
2494 */
2495 if (sink) {
2496 /*
2497 * TODO: check if we still need the S3 mode update workaround.
2498 * If yes, put it here.
2499 */
2500 if (aconnector->dc_sink) {
2501 amdgpu_dm_update_freesync_caps(connector, NULL);
2502 dc_sink_release(aconnector->dc_sink);
2503 }
2504
2505 aconnector->dc_sink = sink;
2506 dc_sink_retain(aconnector->dc_sink);
2507 if (sink->dc_edid.length == 0) {
2508 aconnector->edid = NULL;
2509 if (aconnector->dc_link->aux_mode) {
2510 drm_dp_cec_unset_edid(
2511 &aconnector->dm_dp_aux.aux);
2512 }
2513 } else {
2514 aconnector->edid =
2515 (struct edid *)sink->dc_edid.raw_edid;
2516
2517 drm_connector_update_edid_property(connector,
2518 aconnector->edid);
2519 if (aconnector->dc_link->aux_mode)
2520 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521 aconnector->edid);
2522 }
2523
2524 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525 update_connector_ext_caps(aconnector);
2526 } else {
2527 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528 amdgpu_dm_update_freesync_caps(connector, NULL);
2529 drm_connector_update_edid_property(connector, NULL);
2530 aconnector->num_modes = 0;
2531 dc_sink_release(aconnector->dc_sink);
2532 aconnector->dc_sink = NULL;
2533 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539 }
2540
2541 mutex_unlock(&dev->mode_config.mutex);
2542
2543 update_subconnector_property(aconnector);
2544
2545 if (sink)
2546 dc_sink_release(sink);
2547 }
2548
2549 static void handle_hpd_irq(void *param)
2550 {
2551 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552 struct drm_connector *connector = &aconnector->base;
2553 struct drm_device *dev = connector->dev;
2554 enum dc_connection_type new_connection_type = dc_connection_none;
2555 struct amdgpu_device *adev = drm_to_adev(dev);
2556 #ifdef CONFIG_DRM_AMD_DC_HDCP
2557 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559
2560 if (adev->dm.disable_hpd_irq)
2561 return;
2562
2563 /*
2564 * In case of failure or MST no need to update connector status or notify the OS
2565 * since (for MST case) MST does this in its own context.
2566 */
2567 mutex_lock(&aconnector->hpd_lock);
2568
2569 #ifdef CONFIG_DRM_AMD_DC_HDCP
2570 if (adev->dm.hdcp_workqueue) {
2571 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2572 dm_con_state->update_hdcp = true;
2573 }
2574 #endif
2575 if (aconnector->fake_enable)
2576 aconnector->fake_enable = false;
2577
2578 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2579 DRM_ERROR("KMS: Failed to detect connector\n");
2580
2581 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2582 emulated_link_detect(aconnector->dc_link);
2583
2584
2585 drm_modeset_lock_all(dev);
2586 dm_restore_drm_connector_state(dev, connector);
2587 drm_modeset_unlock_all(dev);
2588
2589 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2590 drm_kms_helper_hotplug_event(dev);
2591
2592 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2593 if (new_connection_type == dc_connection_none &&
2594 aconnector->dc_link->type == dc_connection_none)
2595 dm_set_dpms_off(aconnector->dc_link);
2596
2597 amdgpu_dm_update_connector_after_detect(aconnector);
2598
2599 drm_modeset_lock_all(dev);
2600 dm_restore_drm_connector_state(dev, connector);
2601 drm_modeset_unlock_all(dev);
2602
2603 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2604 drm_kms_helper_hotplug_event(dev);
2605 }
2606 mutex_unlock(&aconnector->hpd_lock);
2607
2608 }
2609
2610 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2611 {
2612 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2613 uint8_t dret;
2614 bool new_irq_handled = false;
2615 int dpcd_addr;
2616 int dpcd_bytes_to_read;
2617
2618 const int max_process_count = 30;
2619 int process_count = 0;
2620
2621 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2622
2623 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2624 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2625 /* DPCD 0x200 - 0x201 for downstream IRQ */
2626 dpcd_addr = DP_SINK_COUNT;
2627 } else {
2628 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2629 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2630 dpcd_addr = DP_SINK_COUNT_ESI;
2631 }
2632
2633 dret = drm_dp_dpcd_read(
2634 &aconnector->dm_dp_aux.aux,
2635 dpcd_addr,
2636 esi,
2637 dpcd_bytes_to_read);
2638
2639 while (dret == dpcd_bytes_to_read &&
2640 process_count < max_process_count) {
2641 uint8_t retry;
2642 dret = 0;
2643
2644 process_count++;
2645
2646 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2647 /* handle HPD short pulse irq */
2648 if (aconnector->mst_mgr.mst_state)
2649 drm_dp_mst_hpd_irq(
2650 &aconnector->mst_mgr,
2651 esi,
2652 &new_irq_handled);
2653
2654 if (new_irq_handled) {
2655 /* ACK at DPCD to notify down stream */
2656 const int ack_dpcd_bytes_to_write =
2657 dpcd_bytes_to_read - 1;
2658
2659 for (retry = 0; retry < 3; retry++) {
2660 uint8_t wret;
2661
2662 wret = drm_dp_dpcd_write(
2663 &aconnector->dm_dp_aux.aux,
2664 dpcd_addr + 1,
2665 &esi[1],
2666 ack_dpcd_bytes_to_write);
2667 if (wret == ack_dpcd_bytes_to_write)
2668 break;
2669 }
2670
2671 /* check if there is new irq to be handled */
2672 dret = drm_dp_dpcd_read(
2673 &aconnector->dm_dp_aux.aux,
2674 dpcd_addr,
2675 esi,
2676 dpcd_bytes_to_read);
2677
2678 new_irq_handled = false;
2679 } else {
2680 break;
2681 }
2682 }
2683
2684 if (process_count == max_process_count)
2685 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2686 }
2687
2688 static void handle_hpd_rx_irq(void *param)
2689 {
2690 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2691 struct drm_connector *connector = &aconnector->base;
2692 struct drm_device *dev = connector->dev;
2693 struct dc_link *dc_link = aconnector->dc_link;
2694 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2695 bool result = false;
2696 enum dc_connection_type new_connection_type = dc_connection_none;
2697 struct amdgpu_device *adev = drm_to_adev(dev);
2698 union hpd_irq_data hpd_irq_data;
2699
2700 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2701
2702 if (adev->dm.disable_hpd_irq)
2703 return;
2704
2705
2706 /*
2707 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2708 * conflict, after implement i2c helper, this mutex should be
2709 * retired.
2710 */
2711 if (dc_link->type != dc_connection_mst_branch)
2712 mutex_lock(&aconnector->hpd_lock);
2713
2714 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2715
2716 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2717 (dc_link->type == dc_connection_mst_branch)) {
2718 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2719 result = true;
2720 dm_handle_hpd_rx_irq(aconnector);
2721 goto out;
2722 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2723 result = false;
2724 dm_handle_hpd_rx_irq(aconnector);
2725 goto out;
2726 }
2727 }
2728
2729 mutex_lock(&adev->dm.dc_lock);
2730 #ifdef CONFIG_DRM_AMD_DC_HDCP
2731 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2732 #else
2733 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2734 #endif
2735 mutex_unlock(&adev->dm.dc_lock);
2736
2737 out:
2738 if (result && !is_mst_root_connector) {
2739 /* Downstream Port status changed. */
2740 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2741 DRM_ERROR("KMS: Failed to detect connector\n");
2742
2743 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2744 emulated_link_detect(dc_link);
2745
2746 if (aconnector->fake_enable)
2747 aconnector->fake_enable = false;
2748
2749 amdgpu_dm_update_connector_after_detect(aconnector);
2750
2751
2752 drm_modeset_lock_all(dev);
2753 dm_restore_drm_connector_state(dev, connector);
2754 drm_modeset_unlock_all(dev);
2755
2756 drm_kms_helper_hotplug_event(dev);
2757 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2758
2759 if (aconnector->fake_enable)
2760 aconnector->fake_enable = false;
2761
2762 amdgpu_dm_update_connector_after_detect(aconnector);
2763
2764
2765 drm_modeset_lock_all(dev);
2766 dm_restore_drm_connector_state(dev, connector);
2767 drm_modeset_unlock_all(dev);
2768
2769 drm_kms_helper_hotplug_event(dev);
2770 }
2771 }
2772 #ifdef CONFIG_DRM_AMD_DC_HDCP
2773 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2774 if (adev->dm.hdcp_workqueue)
2775 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2776 }
2777 #endif
2778
2779 if (dc_link->type != dc_connection_mst_branch) {
2780 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2781 mutex_unlock(&aconnector->hpd_lock);
2782 }
2783 }
2784
2785 static void register_hpd_handlers(struct amdgpu_device *adev)
2786 {
2787 struct drm_device *dev = adev_to_drm(adev);
2788 struct drm_connector *connector;
2789 struct amdgpu_dm_connector *aconnector;
2790 const struct dc_link *dc_link;
2791 struct dc_interrupt_params int_params = {0};
2792
2793 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2794 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2795
2796 list_for_each_entry(connector,
2797 &dev->mode_config.connector_list, head) {
2798
2799 aconnector = to_amdgpu_dm_connector(connector);
2800 dc_link = aconnector->dc_link;
2801
2802 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2803 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2804 int_params.irq_source = dc_link->irq_source_hpd;
2805
2806 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2807 handle_hpd_irq,
2808 (void *) aconnector);
2809 }
2810
2811 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2812
2813 /* Also register for DP short pulse (hpd_rx). */
2814 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2815 int_params.irq_source = dc_link->irq_source_hpd_rx;
2816
2817 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2818 handle_hpd_rx_irq,
2819 (void *) aconnector);
2820 }
2821 }
2822 }
2823
2824 #if defined(CONFIG_DRM_AMD_DC_SI)
2825 /* Register IRQ sources and initialize IRQ callbacks */
2826 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2827 {
2828 struct dc *dc = adev->dm.dc;
2829 struct common_irq_params *c_irq_params;
2830 struct dc_interrupt_params int_params = {0};
2831 int r;
2832 int i;
2833 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2834
2835 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2836 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2837
2838 /*
2839 * Actions of amdgpu_irq_add_id():
2840 * 1. Register a set() function with base driver.
2841 * Base driver will call set() function to enable/disable an
2842 * interrupt in DC hardware.
2843 * 2. Register amdgpu_dm_irq_handler().
2844 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2845 * coming from DC hardware.
2846 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2847 * for acknowledging and handling. */
2848
2849 /* Use VBLANK interrupt */
2850 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2851 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2852 if (r) {
2853 DRM_ERROR("Failed to add crtc irq id!\n");
2854 return r;
2855 }
2856
2857 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2858 int_params.irq_source =
2859 dc_interrupt_to_irq_source(dc, i+1 , 0);
2860
2861 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2862
2863 c_irq_params->adev = adev;
2864 c_irq_params->irq_src = int_params.irq_source;
2865
2866 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2867 dm_crtc_high_irq, c_irq_params);
2868 }
2869
2870 /* Use GRPH_PFLIP interrupt */
2871 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2872 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2873 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2874 if (r) {
2875 DRM_ERROR("Failed to add page flip irq id!\n");
2876 return r;
2877 }
2878
2879 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2880 int_params.irq_source =
2881 dc_interrupt_to_irq_source(dc, i, 0);
2882
2883 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2884
2885 c_irq_params->adev = adev;
2886 c_irq_params->irq_src = int_params.irq_source;
2887
2888 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2889 dm_pflip_high_irq, c_irq_params);
2890
2891 }
2892
2893 /* HPD */
2894 r = amdgpu_irq_add_id(adev, client_id,
2895 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2896 if (r) {
2897 DRM_ERROR("Failed to add hpd irq id!\n");
2898 return r;
2899 }
2900
2901 register_hpd_handlers(adev);
2902
2903 return 0;
2904 }
2905 #endif
2906
2907 /* Register IRQ sources and initialize IRQ callbacks */
2908 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2909 {
2910 struct dc *dc = adev->dm.dc;
2911 struct common_irq_params *c_irq_params;
2912 struct dc_interrupt_params int_params = {0};
2913 int r;
2914 int i;
2915 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2916
2917 if (adev->asic_type >= CHIP_VEGA10)
2918 client_id = SOC15_IH_CLIENTID_DCE;
2919
2920 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2921 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2922
2923 /*
2924 * Actions of amdgpu_irq_add_id():
2925 * 1. Register a set() function with base driver.
2926 * Base driver will call set() function to enable/disable an
2927 * interrupt in DC hardware.
2928 * 2. Register amdgpu_dm_irq_handler().
2929 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2930 * coming from DC hardware.
2931 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2932 * for acknowledging and handling. */
2933
2934 /* Use VBLANK interrupt */
2935 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2936 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2937 if (r) {
2938 DRM_ERROR("Failed to add crtc irq id!\n");
2939 return r;
2940 }
2941
2942 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2943 int_params.irq_source =
2944 dc_interrupt_to_irq_source(dc, i, 0);
2945
2946 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2947
2948 c_irq_params->adev = adev;
2949 c_irq_params->irq_src = int_params.irq_source;
2950
2951 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2952 dm_crtc_high_irq, c_irq_params);
2953 }
2954
2955 /* Use VUPDATE interrupt */
2956 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2957 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2958 if (r) {
2959 DRM_ERROR("Failed to add vupdate irq id!\n");
2960 return r;
2961 }
2962
2963 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2964 int_params.irq_source =
2965 dc_interrupt_to_irq_source(dc, i, 0);
2966
2967 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2968
2969 c_irq_params->adev = adev;
2970 c_irq_params->irq_src = int_params.irq_source;
2971
2972 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2973 dm_vupdate_high_irq, c_irq_params);
2974 }
2975
2976 /* Use GRPH_PFLIP interrupt */
2977 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2978 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2979 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2980 if (r) {
2981 DRM_ERROR("Failed to add page flip irq id!\n");
2982 return r;
2983 }
2984
2985 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2986 int_params.irq_source =
2987 dc_interrupt_to_irq_source(dc, i, 0);
2988
2989 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2990
2991 c_irq_params->adev = adev;
2992 c_irq_params->irq_src = int_params.irq_source;
2993
2994 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2995 dm_pflip_high_irq, c_irq_params);
2996
2997 }
2998
2999 /* HPD */
3000 r = amdgpu_irq_add_id(adev, client_id,
3001 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3002 if (r) {
3003 DRM_ERROR("Failed to add hpd irq id!\n");
3004 return r;
3005 }
3006
3007 register_hpd_handlers(adev);
3008
3009 return 0;
3010 }
3011
3012 #if defined(CONFIG_DRM_AMD_DC_DCN)
3013 /* Register IRQ sources and initialize IRQ callbacks */
3014 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3015 {
3016 struct dc *dc = adev->dm.dc;
3017 struct common_irq_params *c_irq_params;
3018 struct dc_interrupt_params int_params = {0};
3019 int r;
3020 int i;
3021 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3022 static const unsigned int vrtl_int_srcid[] = {
3023 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3024 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3025 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3026 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3027 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3028 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3029 };
3030 #endif
3031
3032 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3033 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3034
3035 /*
3036 * Actions of amdgpu_irq_add_id():
3037 * 1. Register a set() function with base driver.
3038 * Base driver will call set() function to enable/disable an
3039 * interrupt in DC hardware.
3040 * 2. Register amdgpu_dm_irq_handler().
3041 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3042 * coming from DC hardware.
3043 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3044 * for acknowledging and handling.
3045 */
3046
3047 /* Use VSTARTUP interrupt */
3048 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3049 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3050 i++) {
3051 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3052
3053 if (r) {
3054 DRM_ERROR("Failed to add crtc irq id!\n");
3055 return r;
3056 }
3057
3058 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3059 int_params.irq_source =
3060 dc_interrupt_to_irq_source(dc, i, 0);
3061
3062 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3063
3064 c_irq_params->adev = adev;
3065 c_irq_params->irq_src = int_params.irq_source;
3066
3067 amdgpu_dm_irq_register_interrupt(
3068 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3069 }
3070
3071 /* Use otg vertical line interrupt */
3072 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3073 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3074 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3075 vrtl_int_srcid[i], &adev->vline0_irq);
3076
3077 if (r) {
3078 DRM_ERROR("Failed to add vline0 irq id!\n");
3079 return r;
3080 }
3081
3082 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3083 int_params.irq_source =
3084 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3085
3086 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3087 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3088 break;
3089 }
3090
3091 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3092 - DC_IRQ_SOURCE_DC1_VLINE0];
3093
3094 c_irq_params->adev = adev;
3095 c_irq_params->irq_src = int_params.irq_source;
3096
3097 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3098 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3099 }
3100 #endif
3101
3102 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3103 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3104 * to trigger at end of each vblank, regardless of state of the lock,
3105 * matching DCE behaviour.
3106 */
3107 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3108 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3109 i++) {
3110 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3111
3112 if (r) {
3113 DRM_ERROR("Failed to add vupdate irq id!\n");
3114 return r;
3115 }
3116
3117 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3118 int_params.irq_source =
3119 dc_interrupt_to_irq_source(dc, i, 0);
3120
3121 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3122
3123 c_irq_params->adev = adev;
3124 c_irq_params->irq_src = int_params.irq_source;
3125
3126 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3127 dm_vupdate_high_irq, c_irq_params);
3128 }
3129
3130 /* Use GRPH_PFLIP interrupt */
3131 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3132 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3133 i++) {
3134 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3135 if (r) {
3136 DRM_ERROR("Failed to add page flip irq id!\n");
3137 return r;
3138 }
3139
3140 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3141 int_params.irq_source =
3142 dc_interrupt_to_irq_source(dc, i, 0);
3143
3144 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3145
3146 c_irq_params->adev = adev;
3147 c_irq_params->irq_src = int_params.irq_source;
3148
3149 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3150 dm_pflip_high_irq, c_irq_params);
3151
3152 }
3153
3154 if (dc->ctx->dmub_srv) {
3155 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3156 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3157
3158 if (r) {
3159 DRM_ERROR("Failed to add dmub trace irq id!\n");
3160 return r;
3161 }
3162
3163 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3164 int_params.irq_source =
3165 dc_interrupt_to_irq_source(dc, i, 0);
3166
3167 c_irq_params = &adev->dm.dmub_trace_params[0];
3168
3169 c_irq_params->adev = adev;
3170 c_irq_params->irq_src = int_params.irq_source;
3171
3172 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3173 dm_dmub_trace_high_irq, c_irq_params);
3174 }
3175
3176 /* HPD */
3177 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3178 &adev->hpd_irq);
3179 if (r) {
3180 DRM_ERROR("Failed to add hpd irq id!\n");
3181 return r;
3182 }
3183
3184 register_hpd_handlers(adev);
3185
3186 return 0;
3187 }
3188 #endif
3189
3190 /*
3191 * Acquires the lock for the atomic state object and returns
3192 * the new atomic state.
3193 *
3194 * This should only be called during atomic check.
3195 */
3196 static int dm_atomic_get_state(struct drm_atomic_state *state,
3197 struct dm_atomic_state **dm_state)
3198 {
3199 struct drm_device *dev = state->dev;
3200 struct amdgpu_device *adev = drm_to_adev(dev);
3201 struct amdgpu_display_manager *dm = &adev->dm;
3202 struct drm_private_state *priv_state;
3203
3204 if (*dm_state)
3205 return 0;
3206
3207 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3208 if (IS_ERR(priv_state))
3209 return PTR_ERR(priv_state);
3210
3211 *dm_state = to_dm_atomic_state(priv_state);
3212
3213 return 0;
3214 }
3215
3216 static struct dm_atomic_state *
3217 dm_atomic_get_new_state(struct drm_atomic_state *state)
3218 {
3219 struct drm_device *dev = state->dev;
3220 struct amdgpu_device *adev = drm_to_adev(dev);
3221 struct amdgpu_display_manager *dm = &adev->dm;
3222 struct drm_private_obj *obj;
3223 struct drm_private_state *new_obj_state;
3224 int i;
3225
3226 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3227 if (obj->funcs == dm->atomic_obj.funcs)
3228 return to_dm_atomic_state(new_obj_state);
3229 }
3230
3231 return NULL;
3232 }
3233
3234 static struct drm_private_state *
3235 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3236 {
3237 struct dm_atomic_state *old_state, *new_state;
3238
3239 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3240 if (!new_state)
3241 return NULL;
3242
3243 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3244
3245 old_state = to_dm_atomic_state(obj->state);
3246
3247 if (old_state && old_state->context)
3248 new_state->context = dc_copy_state(old_state->context);
3249
3250 if (!new_state->context) {
3251 kfree(new_state);
3252 return NULL;
3253 }
3254
3255 return &new_state->base;
3256 }
3257
3258 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3259 struct drm_private_state *state)
3260 {
3261 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3262
3263 if (dm_state && dm_state->context)
3264 dc_release_state(dm_state->context);
3265
3266 kfree(dm_state);
3267 }
3268
3269 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3270 .atomic_duplicate_state = dm_atomic_duplicate_state,
3271 .atomic_destroy_state = dm_atomic_destroy_state,
3272 };
3273
3274 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3275 {
3276 struct dm_atomic_state *state;
3277 int r;
3278
3279 adev->mode_info.mode_config_initialized = true;
3280
3281 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3282 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3283
3284 adev_to_drm(adev)->mode_config.max_width = 16384;
3285 adev_to_drm(adev)->mode_config.max_height = 16384;
3286
3287 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3288 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3289 /* indicates support for immediate flip */
3290 adev_to_drm(adev)->mode_config.async_page_flip = true;
3291
3292 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3293
3294 state = kzalloc(sizeof(*state), GFP_KERNEL);
3295 if (!state)
3296 return -ENOMEM;
3297
3298 state->context = dc_create_state(adev->dm.dc);
3299 if (!state->context) {
3300 kfree(state);
3301 return -ENOMEM;
3302 }
3303
3304 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3305
3306 drm_atomic_private_obj_init(adev_to_drm(adev),
3307 &adev->dm.atomic_obj,
3308 &state->base,
3309 &dm_atomic_state_funcs);
3310
3311 r = amdgpu_display_modeset_create_props(adev);
3312 if (r) {
3313 dc_release_state(state->context);
3314 kfree(state);
3315 return r;
3316 }
3317
3318 r = amdgpu_dm_audio_init(adev);
3319 if (r) {
3320 dc_release_state(state->context);
3321 kfree(state);
3322 return r;
3323 }
3324
3325 return 0;
3326 }
3327
3328 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3329 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3330 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3331
3332 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3333 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3334
3335 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3336 {
3337 #if defined(CONFIG_ACPI)
3338 struct amdgpu_dm_backlight_caps caps;
3339
3340 memset(&caps, 0, sizeof(caps));
3341
3342 if (dm->backlight_caps.caps_valid)
3343 return;
3344
3345 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3346 if (caps.caps_valid) {
3347 dm->backlight_caps.caps_valid = true;
3348 if (caps.aux_support)
3349 return;
3350 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3351 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3352 } else {
3353 dm->backlight_caps.min_input_signal =
3354 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3355 dm->backlight_caps.max_input_signal =
3356 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3357 }
3358 #else
3359 if (dm->backlight_caps.aux_support)
3360 return;
3361
3362 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3363 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3364 #endif
3365 }
3366
3367 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3368 unsigned *min, unsigned *max)
3369 {
3370 if (!caps)
3371 return 0;
3372
3373 if (caps->aux_support) {
3374 // Firmware limits are in nits, DC API wants millinits.
3375 *max = 1000 * caps->aux_max_input_signal;
3376 *min = 1000 * caps->aux_min_input_signal;
3377 } else {
3378 // Firmware limits are 8-bit, PWM control is 16-bit.
3379 *max = 0x101 * caps->max_input_signal;
3380 *min = 0x101 * caps->min_input_signal;
3381 }
3382 return 1;
3383 }
3384
3385 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3386 uint32_t brightness)
3387 {
3388 unsigned min, max;
3389
3390 if (!get_brightness_range(caps, &min, &max))
3391 return brightness;
3392
3393 // Rescale 0..255 to min..max
3394 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3395 AMDGPU_MAX_BL_LEVEL);
3396 }
3397
3398 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3399 uint32_t brightness)
3400 {
3401 unsigned min, max;
3402
3403 if (!get_brightness_range(caps, &min, &max))
3404 return brightness;
3405
3406 if (brightness < min)
3407 return 0;
3408 // Rescale min..max to 0..255
3409 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3410 max - min);
3411 }
3412
3413 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3414 {
3415 struct amdgpu_display_manager *dm = bl_get_data(bd);
3416 struct amdgpu_dm_backlight_caps caps;
3417 struct dc_link *link = NULL;
3418 u32 brightness;
3419 bool rc;
3420
3421 amdgpu_dm_update_backlight_caps(dm);
3422 caps = dm->backlight_caps;
3423
3424 link = (struct dc_link *)dm->backlight_link;
3425
3426 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3427 // Change brightness based on AUX property
3428 if (caps.aux_support)
3429 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3430 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3431 else
3432 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3433
3434 return rc ? 0 : 1;
3435 }
3436
3437 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3438 {
3439 struct amdgpu_display_manager *dm = bl_get_data(bd);
3440 struct amdgpu_dm_backlight_caps caps;
3441
3442 amdgpu_dm_update_backlight_caps(dm);
3443 caps = dm->backlight_caps;
3444
3445 if (caps.aux_support) {
3446 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3447 u32 avg, peak;
3448 bool rc;
3449
3450 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3451 if (!rc)
3452 return bd->props.brightness;
3453 return convert_brightness_to_user(&caps, avg);
3454 } else {
3455 int ret = dc_link_get_backlight_level(dm->backlight_link);
3456
3457 if (ret == DC_ERROR_UNEXPECTED)
3458 return bd->props.brightness;
3459 return convert_brightness_to_user(&caps, ret);
3460 }
3461 }
3462
3463 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3464 .options = BL_CORE_SUSPENDRESUME,
3465 .get_brightness = amdgpu_dm_backlight_get_brightness,
3466 .update_status = amdgpu_dm_backlight_update_status,
3467 };
3468
3469 static void
3470 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3471 {
3472 char bl_name[16];
3473 struct backlight_properties props = { 0 };
3474
3475 amdgpu_dm_update_backlight_caps(dm);
3476
3477 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3478 props.brightness = AMDGPU_MAX_BL_LEVEL;
3479 props.type = BACKLIGHT_RAW;
3480
3481 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3482 adev_to_drm(dm->adev)->primary->index);
3483
3484 dm->backlight_dev = backlight_device_register(bl_name,
3485 adev_to_drm(dm->adev)->dev,
3486 dm,
3487 &amdgpu_dm_backlight_ops,
3488 &props);
3489
3490 if (IS_ERR(dm->backlight_dev))
3491 DRM_ERROR("DM: Backlight registration failed!\n");
3492 else
3493 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3494 }
3495
3496 #endif
3497
3498 static int initialize_plane(struct amdgpu_display_manager *dm,
3499 struct amdgpu_mode_info *mode_info, int plane_id,
3500 enum drm_plane_type plane_type,
3501 const struct dc_plane_cap *plane_cap)
3502 {
3503 struct drm_plane *plane;
3504 unsigned long possible_crtcs;
3505 int ret = 0;
3506
3507 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3508 if (!plane) {
3509 DRM_ERROR("KMS: Failed to allocate plane\n");
3510 return -ENOMEM;
3511 }
3512 plane->type = plane_type;
3513
3514 /*
3515 * HACK: IGT tests expect that the primary plane for a CRTC
3516 * can only have one possible CRTC. Only expose support for
3517 * any CRTC if they're not going to be used as a primary plane
3518 * for a CRTC - like overlay or underlay planes.
3519 */
3520 possible_crtcs = 1 << plane_id;
3521 if (plane_id >= dm->dc->caps.max_streams)
3522 possible_crtcs = 0xff;
3523
3524 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3525
3526 if (ret) {
3527 DRM_ERROR("KMS: Failed to initialize plane\n");
3528 kfree(plane);
3529 return ret;
3530 }
3531
3532 if (mode_info)
3533 mode_info->planes[plane_id] = plane;
3534
3535 return ret;
3536 }
3537
3538
3539 static void register_backlight_device(struct amdgpu_display_manager *dm,
3540 struct dc_link *link)
3541 {
3542 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3543 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3544
3545 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3546 link->type != dc_connection_none) {
3547 /*
3548 * Event if registration failed, we should continue with
3549 * DM initialization because not having a backlight control
3550 * is better then a black screen.
3551 */
3552 amdgpu_dm_register_backlight_device(dm);
3553
3554 if (dm->backlight_dev)
3555 dm->backlight_link = link;
3556 }
3557 #endif
3558 }
3559
3560
3561 /*
3562 * In this architecture, the association
3563 * connector -> encoder -> crtc
3564 * id not really requried. The crtc and connector will hold the
3565 * display_index as an abstraction to use with DAL component
3566 *
3567 * Returns 0 on success
3568 */
3569 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3570 {
3571 struct amdgpu_display_manager *dm = &adev->dm;
3572 int32_t i;
3573 struct amdgpu_dm_connector *aconnector = NULL;
3574 struct amdgpu_encoder *aencoder = NULL;
3575 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3576 uint32_t link_cnt;
3577 int32_t primary_planes;
3578 enum dc_connection_type new_connection_type = dc_connection_none;
3579 const struct dc_plane_cap *plane;
3580
3581 dm->display_indexes_num = dm->dc->caps.max_streams;
3582 /* Update the actual used number of crtc */
3583 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3584
3585 link_cnt = dm->dc->caps.max_links;
3586 if (amdgpu_dm_mode_config_init(dm->adev)) {
3587 DRM_ERROR("DM: Failed to initialize mode config\n");
3588 return -EINVAL;
3589 }
3590
3591 /* There is one primary plane per CRTC */
3592 primary_planes = dm->dc->caps.max_streams;
3593 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3594
3595 /*
3596 * Initialize primary planes, implicit planes for legacy IOCTLS.
3597 * Order is reversed to match iteration order in atomic check.
3598 */
3599 for (i = (primary_planes - 1); i >= 0; i--) {
3600 plane = &dm->dc->caps.planes[i];
3601
3602 if (initialize_plane(dm, mode_info, i,
3603 DRM_PLANE_TYPE_PRIMARY, plane)) {
3604 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3605 goto fail;
3606 }
3607 }
3608
3609 /*
3610 * Initialize overlay planes, index starting after primary planes.
3611 * These planes have a higher DRM index than the primary planes since
3612 * they should be considered as having a higher z-order.
3613 * Order is reversed to match iteration order in atomic check.
3614 *
3615 * Only support DCN for now, and only expose one so we don't encourage
3616 * userspace to use up all the pipes.
3617 */
3618 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3619 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3620
3621 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3622 continue;
3623
3624 if (!plane->blends_with_above || !plane->blends_with_below)
3625 continue;
3626
3627 if (!plane->pixel_format_support.argb8888)
3628 continue;
3629
3630 if (initialize_plane(dm, NULL, primary_planes + i,
3631 DRM_PLANE_TYPE_OVERLAY, plane)) {
3632 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3633 goto fail;
3634 }
3635
3636 /* Only create one overlay plane. */
3637 break;
3638 }
3639
3640 for (i = 0; i < dm->dc->caps.max_streams; i++)
3641 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3642 DRM_ERROR("KMS: Failed to initialize crtc\n");
3643 goto fail;
3644 }
3645
3646 /* loops over all connectors on the board */
3647 for (i = 0; i < link_cnt; i++) {
3648 struct dc_link *link = NULL;
3649
3650 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3651 DRM_ERROR(
3652 "KMS: Cannot support more than %d display indexes\n",
3653 AMDGPU_DM_MAX_DISPLAY_INDEX);
3654 continue;
3655 }
3656
3657 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3658 if (!aconnector)
3659 goto fail;
3660
3661 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3662 if (!aencoder)
3663 goto fail;
3664
3665 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3666 DRM_ERROR("KMS: Failed to initialize encoder\n");
3667 goto fail;
3668 }
3669
3670 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3671 DRM_ERROR("KMS: Failed to initialize connector\n");
3672 goto fail;
3673 }
3674
3675 link = dc_get_link_at_index(dm->dc, i);
3676
3677 if (!dc_link_detect_sink(link, &new_connection_type))
3678 DRM_ERROR("KMS: Failed to detect connector\n");
3679
3680 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3681 emulated_link_detect(link);
3682 amdgpu_dm_update_connector_after_detect(aconnector);
3683
3684 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3685 amdgpu_dm_update_connector_after_detect(aconnector);
3686 register_backlight_device(dm, link);
3687 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3688 amdgpu_dm_set_psr_caps(link);
3689 }
3690
3691
3692 }
3693
3694 /* Software is initialized. Now we can register interrupt handlers. */
3695 switch (adev->asic_type) {
3696 #if defined(CONFIG_DRM_AMD_DC_SI)
3697 case CHIP_TAHITI:
3698 case CHIP_PITCAIRN:
3699 case CHIP_VERDE:
3700 case CHIP_OLAND:
3701 if (dce60_register_irq_handlers(dm->adev)) {
3702 DRM_ERROR("DM: Failed to initialize IRQ\n");
3703 goto fail;
3704 }
3705 break;
3706 #endif
3707 case CHIP_BONAIRE:
3708 case CHIP_HAWAII:
3709 case CHIP_KAVERI:
3710 case CHIP_KABINI:
3711 case CHIP_MULLINS:
3712 case CHIP_TONGA:
3713 case CHIP_FIJI:
3714 case CHIP_CARRIZO:
3715 case CHIP_STONEY:
3716 case CHIP_POLARIS11:
3717 case CHIP_POLARIS10:
3718 case CHIP_POLARIS12:
3719 case CHIP_VEGAM:
3720 case CHIP_VEGA10:
3721 case CHIP_VEGA12:
3722 case CHIP_VEGA20:
3723 if (dce110_register_irq_handlers(dm->adev)) {
3724 DRM_ERROR("DM: Failed to initialize IRQ\n");
3725 goto fail;
3726 }
3727 break;
3728 #if defined(CONFIG_DRM_AMD_DC_DCN)
3729 case CHIP_RAVEN:
3730 case CHIP_NAVI12:
3731 case CHIP_NAVI10:
3732 case CHIP_NAVI14:
3733 case CHIP_RENOIR:
3734 case CHIP_SIENNA_CICHLID:
3735 case CHIP_NAVY_FLOUNDER:
3736 case CHIP_DIMGREY_CAVEFISH:
3737 case CHIP_VANGOGH:
3738 if (dcn10_register_irq_handlers(dm->adev)) {
3739 DRM_ERROR("DM: Failed to initialize IRQ\n");
3740 goto fail;
3741 }
3742 break;
3743 #endif
3744 default:
3745 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3746 goto fail;
3747 }
3748
3749 return 0;
3750 fail:
3751 kfree(aencoder);
3752 kfree(aconnector);
3753
3754 return -EINVAL;
3755 }
3756
3757 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3758 {
3759 drm_mode_config_cleanup(dm->ddev);
3760 drm_atomic_private_obj_fini(&dm->atomic_obj);
3761 return;
3762 }
3763
3764 /******************************************************************************
3765 * amdgpu_display_funcs functions
3766 *****************************************************************************/
3767
3768 /*
3769 * dm_bandwidth_update - program display watermarks
3770 *
3771 * @adev: amdgpu_device pointer
3772 *
3773 * Calculate and program the display watermarks and line buffer allocation.
3774 */
3775 static void dm_bandwidth_update(struct amdgpu_device *adev)
3776 {
3777 /* TODO: implement later */
3778 }
3779
3780 static const struct amdgpu_display_funcs dm_display_funcs = {
3781 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3782 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3783 .backlight_set_level = NULL, /* never called for DC */
3784 .backlight_get_level = NULL, /* never called for DC */
3785 .hpd_sense = NULL,/* called unconditionally */
3786 .hpd_set_polarity = NULL, /* called unconditionally */
3787 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3788 .page_flip_get_scanoutpos =
3789 dm_crtc_get_scanoutpos,/* called unconditionally */
3790 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3791 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3792 };
3793
3794 #if defined(CONFIG_DEBUG_KERNEL_DC)
3795
3796 static ssize_t s3_debug_store(struct device *device,
3797 struct device_attribute *attr,
3798 const char *buf,
3799 size_t count)
3800 {
3801 int ret;
3802 int s3_state;
3803 struct drm_device *drm_dev = dev_get_drvdata(device);
3804 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3805
3806 ret = kstrtoint(buf, 0, &s3_state);
3807
3808 if (ret == 0) {
3809 if (s3_state) {
3810 dm_resume(adev);
3811 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3812 } else
3813 dm_suspend(adev);
3814 }
3815
3816 return ret == 0 ? count : 0;
3817 }
3818
3819 DEVICE_ATTR_WO(s3_debug);
3820
3821 #endif
3822
3823 static int dm_early_init(void *handle)
3824 {
3825 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3826
3827 switch (adev->asic_type) {
3828 #if defined(CONFIG_DRM_AMD_DC_SI)
3829 case CHIP_TAHITI:
3830 case CHIP_PITCAIRN:
3831 case CHIP_VERDE:
3832 adev->mode_info.num_crtc = 6;
3833 adev->mode_info.num_hpd = 6;
3834 adev->mode_info.num_dig = 6;
3835 break;
3836 case CHIP_OLAND:
3837 adev->mode_info.num_crtc = 2;
3838 adev->mode_info.num_hpd = 2;
3839 adev->mode_info.num_dig = 2;
3840 break;
3841 #endif
3842 case CHIP_BONAIRE:
3843 case CHIP_HAWAII:
3844 adev->mode_info.num_crtc = 6;
3845 adev->mode_info.num_hpd = 6;
3846 adev->mode_info.num_dig = 6;
3847 break;
3848 case CHIP_KAVERI:
3849 adev->mode_info.num_crtc = 4;
3850 adev->mode_info.num_hpd = 6;
3851 adev->mode_info.num_dig = 7;
3852 break;
3853 case CHIP_KABINI:
3854 case CHIP_MULLINS:
3855 adev->mode_info.num_crtc = 2;
3856 adev->mode_info.num_hpd = 6;
3857 adev->mode_info.num_dig = 6;
3858 break;
3859 case CHIP_FIJI:
3860 case CHIP_TONGA:
3861 adev->mode_info.num_crtc = 6;
3862 adev->mode_info.num_hpd = 6;
3863 adev->mode_info.num_dig = 7;
3864 break;
3865 case CHIP_CARRIZO:
3866 adev->mode_info.num_crtc = 3;
3867 adev->mode_info.num_hpd = 6;
3868 adev->mode_info.num_dig = 9;
3869 break;
3870 case CHIP_STONEY:
3871 adev->mode_info.num_crtc = 2;
3872 adev->mode_info.num_hpd = 6;
3873 adev->mode_info.num_dig = 9;
3874 break;
3875 case CHIP_POLARIS11:
3876 case CHIP_POLARIS12:
3877 adev->mode_info.num_crtc = 5;
3878 adev->mode_info.num_hpd = 5;
3879 adev->mode_info.num_dig = 5;
3880 break;
3881 case CHIP_POLARIS10:
3882 case CHIP_VEGAM:
3883 adev->mode_info.num_crtc = 6;
3884 adev->mode_info.num_hpd = 6;
3885 adev->mode_info.num_dig = 6;
3886 break;
3887 case CHIP_VEGA10:
3888 case CHIP_VEGA12:
3889 case CHIP_VEGA20:
3890 adev->mode_info.num_crtc = 6;
3891 adev->mode_info.num_hpd = 6;
3892 adev->mode_info.num_dig = 6;
3893 break;
3894 #if defined(CONFIG_DRM_AMD_DC_DCN)
3895 case CHIP_RAVEN:
3896 case CHIP_RENOIR:
3897 case CHIP_VANGOGH:
3898 adev->mode_info.num_crtc = 4;
3899 adev->mode_info.num_hpd = 4;
3900 adev->mode_info.num_dig = 4;
3901 break;
3902 case CHIP_NAVI10:
3903 case CHIP_NAVI12:
3904 case CHIP_SIENNA_CICHLID:
3905 case CHIP_NAVY_FLOUNDER:
3906 adev->mode_info.num_crtc = 6;
3907 adev->mode_info.num_hpd = 6;
3908 adev->mode_info.num_dig = 6;
3909 break;
3910 case CHIP_NAVI14:
3911 case CHIP_DIMGREY_CAVEFISH:
3912 adev->mode_info.num_crtc = 5;
3913 adev->mode_info.num_hpd = 5;
3914 adev->mode_info.num_dig = 5;
3915 break;
3916 #endif
3917 default:
3918 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3919 return -EINVAL;
3920 }
3921
3922 amdgpu_dm_set_irq_funcs(adev);
3923
3924 if (adev->mode_info.funcs == NULL)
3925 adev->mode_info.funcs = &dm_display_funcs;
3926
3927 /*
3928 * Note: Do NOT change adev->audio_endpt_rreg and
3929 * adev->audio_endpt_wreg because they are initialised in
3930 * amdgpu_device_init()
3931 */
3932 #if defined(CONFIG_DEBUG_KERNEL_DC)
3933 device_create_file(
3934 adev_to_drm(adev)->dev,
3935 &dev_attr_s3_debug);
3936 #endif
3937
3938 return 0;
3939 }
3940
3941 static bool modeset_required(struct drm_crtc_state *crtc_state,
3942 struct dc_stream_state *new_stream,
3943 struct dc_stream_state *old_stream)
3944 {
3945 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3946 }
3947
3948 static bool modereset_required(struct drm_crtc_state *crtc_state)
3949 {
3950 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3951 }
3952
3953 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3954 {
3955 drm_encoder_cleanup(encoder);
3956 kfree(encoder);
3957 }
3958
3959 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3960 .destroy = amdgpu_dm_encoder_destroy,
3961 };
3962
3963
3964 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3965 struct drm_framebuffer *fb,
3966 int *min_downscale, int *max_upscale)
3967 {
3968 struct amdgpu_device *adev = drm_to_adev(dev);
3969 struct dc *dc = adev->dm.dc;
3970 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3971 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3972
3973 switch (fb->format->format) {
3974 case DRM_FORMAT_P010:
3975 case DRM_FORMAT_NV12:
3976 case DRM_FORMAT_NV21:
3977 *max_upscale = plane_cap->max_upscale_factor.nv12;
3978 *min_downscale = plane_cap->max_downscale_factor.nv12;
3979 break;
3980
3981 case DRM_FORMAT_XRGB16161616F:
3982 case DRM_FORMAT_ARGB16161616F:
3983 case DRM_FORMAT_XBGR16161616F:
3984 case DRM_FORMAT_ABGR16161616F:
3985 *max_upscale = plane_cap->max_upscale_factor.fp16;
3986 *min_downscale = plane_cap->max_downscale_factor.fp16;
3987 break;
3988
3989 default:
3990 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3991 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3992 break;
3993 }
3994
3995 /*
3996 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3997 * scaling factor of 1.0 == 1000 units.
3998 */
3999 if (*max_upscale == 1)
4000 *max_upscale = 1000;
4001
4002 if (*min_downscale == 1)
4003 *min_downscale = 1000;
4004 }
4005
4006
4007 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4008 struct dc_scaling_info *scaling_info)
4009 {
4010 int scale_w, scale_h, min_downscale, max_upscale;
4011
4012 memset(scaling_info, 0, sizeof(*scaling_info));
4013
4014 /* Source is fixed 16.16 but we ignore mantissa for now... */
4015 scaling_info->src_rect.x = state->src_x >> 16;
4016 scaling_info->src_rect.y = state->src_y >> 16;
4017
4018 /*
4019 * For reasons we don't (yet) fully understand a non-zero
4020 * src_y coordinate into an NV12 buffer can cause a
4021 * system hang. To avoid hangs (and maybe be overly cautious)
4022 * let's reject both non-zero src_x and src_y.
4023 *
4024 * We currently know of only one use-case to reproduce a
4025 * scenario with non-zero src_x and src_y for NV12, which
4026 * is to gesture the YouTube Android app into full screen
4027 * on ChromeOS.
4028 */
4029 if (state->fb &&
4030 state->fb->format->format == DRM_FORMAT_NV12 &&
4031 (scaling_info->src_rect.x != 0 ||
4032 scaling_info->src_rect.y != 0))
4033 return -EINVAL;
4034
4035 scaling_info->src_rect.width = state->src_w >> 16;
4036 if (scaling_info->src_rect.width == 0)
4037 return -EINVAL;
4038
4039 scaling_info->src_rect.height = state->src_h >> 16;
4040 if (scaling_info->src_rect.height == 0)
4041 return -EINVAL;
4042
4043 scaling_info->dst_rect.x = state->crtc_x;
4044 scaling_info->dst_rect.y = state->crtc_y;
4045
4046 if (state->crtc_w == 0)
4047 return -EINVAL;
4048
4049 scaling_info->dst_rect.width = state->crtc_w;
4050
4051 if (state->crtc_h == 0)
4052 return -EINVAL;
4053
4054 scaling_info->dst_rect.height = state->crtc_h;
4055
4056 /* DRM doesn't specify clipping on destination output. */
4057 scaling_info->clip_rect = scaling_info->dst_rect;
4058
4059 /* Validate scaling per-format with DC plane caps */
4060 if (state->plane && state->plane->dev && state->fb) {
4061 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4062 &min_downscale, &max_upscale);
4063 } else {
4064 min_downscale = 250;
4065 max_upscale = 16000;
4066 }
4067
4068 scale_w = scaling_info->dst_rect.width * 1000 /
4069 scaling_info->src_rect.width;
4070
4071 if (scale_w < min_downscale || scale_w > max_upscale)
4072 return -EINVAL;
4073
4074 scale_h = scaling_info->dst_rect.height * 1000 /
4075 scaling_info->src_rect.height;
4076
4077 if (scale_h < min_downscale || scale_h > max_upscale)
4078 return -EINVAL;
4079
4080 /*
4081 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4082 * assume reasonable defaults based on the format.
4083 */
4084
4085 return 0;
4086 }
4087
4088 static void
4089 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4090 uint64_t tiling_flags)
4091 {
4092 /* Fill GFX8 params */
4093 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4094 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4095
4096 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4097 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4098 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4099 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4100 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4101
4102 /* XXX fix me for VI */
4103 tiling_info->gfx8.num_banks = num_banks;
4104 tiling_info->gfx8.array_mode =
4105 DC_ARRAY_2D_TILED_THIN1;
4106 tiling_info->gfx8.tile_split = tile_split;
4107 tiling_info->gfx8.bank_width = bankw;
4108 tiling_info->gfx8.bank_height = bankh;
4109 tiling_info->gfx8.tile_aspect = mtaspect;
4110 tiling_info->gfx8.tile_mode =
4111 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4112 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4113 == DC_ARRAY_1D_TILED_THIN1) {
4114 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4115 }
4116
4117 tiling_info->gfx8.pipe_config =
4118 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4119 }
4120
4121 static void
4122 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4123 union dc_tiling_info *tiling_info)
4124 {
4125 tiling_info->gfx9.num_pipes =
4126 adev->gfx.config.gb_addr_config_fields.num_pipes;
4127 tiling_info->gfx9.num_banks =
4128 adev->gfx.config.gb_addr_config_fields.num_banks;
4129 tiling_info->gfx9.pipe_interleave =
4130 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4131 tiling_info->gfx9.num_shader_engines =
4132 adev->gfx.config.gb_addr_config_fields.num_se;
4133 tiling_info->gfx9.max_compressed_frags =
4134 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4135 tiling_info->gfx9.num_rb_per_se =
4136 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4137 tiling_info->gfx9.shaderEnable = 1;
4138 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4139 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4140 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4141 adev->asic_type == CHIP_VANGOGH)
4142 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4143 }
4144
4145 static int
4146 validate_dcc(struct amdgpu_device *adev,
4147 const enum surface_pixel_format format,
4148 const enum dc_rotation_angle rotation,
4149 const union dc_tiling_info *tiling_info,
4150 const struct dc_plane_dcc_param *dcc,
4151 const struct dc_plane_address *address,
4152 const struct plane_size *plane_size)
4153 {
4154 struct dc *dc = adev->dm.dc;
4155 struct dc_dcc_surface_param input;
4156 struct dc_surface_dcc_cap output;
4157
4158 memset(&input, 0, sizeof(input));
4159 memset(&output, 0, sizeof(output));
4160
4161 if (!dcc->enable)
4162 return 0;
4163
4164 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4165 !dc->cap_funcs.get_dcc_compression_cap)
4166 return -EINVAL;
4167
4168 input.format = format;
4169 input.surface_size.width = plane_size->surface_size.width;
4170 input.surface_size.height = plane_size->surface_size.height;
4171 input.swizzle_mode = tiling_info->gfx9.swizzle;
4172
4173 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4174 input.scan = SCAN_DIRECTION_HORIZONTAL;
4175 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4176 input.scan = SCAN_DIRECTION_VERTICAL;
4177
4178 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4179 return -EINVAL;
4180
4181 if (!output.capable)
4182 return -EINVAL;
4183
4184 if (dcc->independent_64b_blks == 0 &&
4185 output.grph.rgb.independent_64b_blks != 0)
4186 return -EINVAL;
4187
4188 return 0;
4189 }
4190
4191 static bool
4192 modifier_has_dcc(uint64_t modifier)
4193 {
4194 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4195 }
4196
4197 static unsigned
4198 modifier_gfx9_swizzle_mode(uint64_t modifier)
4199 {
4200 if (modifier == DRM_FORMAT_MOD_LINEAR)
4201 return 0;
4202
4203 return AMD_FMT_MOD_GET(TILE, modifier);
4204 }
4205
4206 static const struct drm_format_info *
4207 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4208 {
4209 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4210 }
4211
4212 static void
4213 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4214 union dc_tiling_info *tiling_info,
4215 uint64_t modifier)
4216 {
4217 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4218 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4219 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4220 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4221
4222 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4223
4224 if (!IS_AMD_FMT_MOD(modifier))
4225 return;
4226
4227 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4228 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4229
4230 if (adev->family >= AMDGPU_FAMILY_NV) {
4231 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4232 } else {
4233 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4234
4235 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4236 }
4237 }
4238
4239 enum dm_micro_swizzle {
4240 MICRO_SWIZZLE_Z = 0,
4241 MICRO_SWIZZLE_S = 1,
4242 MICRO_SWIZZLE_D = 2,
4243 MICRO_SWIZZLE_R = 3
4244 };
4245
4246 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4247 uint32_t format,
4248 uint64_t modifier)
4249 {
4250 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4251 const struct drm_format_info *info = drm_format_info(format);
4252 int i;
4253
4254 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4255
4256 if (!info)
4257 return false;
4258
4259 /*
4260 * We always have to allow these modifiers:
4261 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4262 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4263 */
4264 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4265 modifier == DRM_FORMAT_MOD_INVALID) {
4266 return true;
4267 }
4268
4269 /* Check that the modifier is on the list of the plane's supported modifiers. */
4270 for (i = 0; i < plane->modifier_count; i++) {
4271 if (modifier == plane->modifiers[i])
4272 break;
4273 }
4274 if (i == plane->modifier_count)
4275 return false;
4276
4277 /*
4278 * For D swizzle the canonical modifier depends on the bpp, so check
4279 * it here.
4280 */
4281 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4282 adev->family >= AMDGPU_FAMILY_NV) {
4283 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4284 return false;
4285 }
4286
4287 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4288 info->cpp[0] < 8)
4289 return false;
4290
4291 if (modifier_has_dcc(modifier)) {
4292 /* Per radeonsi comments 16/64 bpp are more complicated. */
4293 if (info->cpp[0] != 4)
4294 return false;
4295 /* We support multi-planar formats, but not when combined with
4296 * additional DCC metadata planes. */
4297 if (info->num_planes > 1)
4298 return false;
4299 }
4300
4301 return true;
4302 }
4303
4304 static void
4305 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4306 {
4307 if (!*mods)
4308 return;
4309
4310 if (*cap - *size < 1) {
4311 uint64_t new_cap = *cap * 2;
4312 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4313
4314 if (!new_mods) {
4315 kfree(*mods);
4316 *mods = NULL;
4317 return;
4318 }
4319
4320 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4321 kfree(*mods);
4322 *mods = new_mods;
4323 *cap = new_cap;
4324 }
4325
4326 (*mods)[*size] = mod;
4327 *size += 1;
4328 }
4329
4330 static void
4331 add_gfx9_modifiers(const struct amdgpu_device *adev,
4332 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4333 {
4334 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4335 int pipe_xor_bits = min(8, pipes +
4336 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4337 int bank_xor_bits = min(8 - pipe_xor_bits,
4338 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4339 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4340 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4341
4342
4343 if (adev->family == AMDGPU_FAMILY_RV) {
4344 /* Raven2 and later */
4345 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4346
4347 /*
4348 * No _D DCC swizzles yet because we only allow 32bpp, which
4349 * doesn't support _D on DCN
4350 */
4351
4352 if (has_constant_encode) {
4353 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4354 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4355 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4356 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4357 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4358 AMD_FMT_MOD_SET(DCC, 1) |
4359 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4360 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4361 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4362 }
4363
4364 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4365 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4366 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4367 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4368 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4369 AMD_FMT_MOD_SET(DCC, 1) |
4370 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4371 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4372 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4373
4374 if (has_constant_encode) {
4375 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4376 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4377 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4378 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4379 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4380 AMD_FMT_MOD_SET(DCC, 1) |
4381 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4382 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4383 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4384
4385 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4386 AMD_FMT_MOD_SET(RB, rb) |
4387 AMD_FMT_MOD_SET(PIPE, pipes));
4388 }
4389
4390 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4391 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4392 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4393 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4394 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4395 AMD_FMT_MOD_SET(DCC, 1) |
4396 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4397 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4398 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4399 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4400 AMD_FMT_MOD_SET(RB, rb) |
4401 AMD_FMT_MOD_SET(PIPE, pipes));
4402 }
4403
4404 /*
4405 * Only supported for 64bpp on Raven, will be filtered on format in
4406 * dm_plane_format_mod_supported.
4407 */
4408 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4409 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4410 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4411 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4412 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4413
4414 if (adev->family == AMDGPU_FAMILY_RV) {
4415 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4416 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4417 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4418 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4419 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4420 }
4421
4422 /*
4423 * Only supported for 64bpp on Raven, will be filtered on format in
4424 * dm_plane_format_mod_supported.
4425 */
4426 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4427 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4428 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4429
4430 if (adev->family == AMDGPU_FAMILY_RV) {
4431 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4432 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4433 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4434 }
4435 }
4436
4437 static void
4438 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4439 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4440 {
4441 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4442
4443 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4444 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4445 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4446 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4447 AMD_FMT_MOD_SET(DCC, 1) |
4448 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4449 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4450 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4451
4452 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4453 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4454 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4455 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4456 AMD_FMT_MOD_SET(DCC, 1) |
4457 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4458 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4459 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4460 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4461
4462 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4463 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4464 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4465 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4466
4467 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4468 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4469 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4470 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4471
4472
4473 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4474 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4475 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4476 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4477
4478 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4479 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4480 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4481 }
4482
4483 static void
4484 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4485 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4486 {
4487 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4488 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4489
4490 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4491 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4492 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4493 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4494 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4495 AMD_FMT_MOD_SET(DCC, 1) |
4496 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4497 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4498 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4499 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4500
4501 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4502 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4503 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4504 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4505 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4506 AMD_FMT_MOD_SET(DCC, 1) |
4507 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4508 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4509 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4510 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4511 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4512
4513 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4514 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4515 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4516 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4517 AMD_FMT_MOD_SET(PACKERS, pkrs));
4518
4519 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4520 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4521 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4522 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4523 AMD_FMT_MOD_SET(PACKERS, pkrs));
4524
4525 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4526 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4527 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4528 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4529
4530 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4531 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4532 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4533 }
4534
4535 static int
4536 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4537 {
4538 uint64_t size = 0, capacity = 128;
4539 *mods = NULL;
4540
4541 /* We have not hooked up any pre-GFX9 modifiers. */
4542 if (adev->family < AMDGPU_FAMILY_AI)
4543 return 0;
4544
4545 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4546
4547 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4548 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4549 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4550 return *mods ? 0 : -ENOMEM;
4551 }
4552
4553 switch (adev->family) {
4554 case AMDGPU_FAMILY_AI:
4555 case AMDGPU_FAMILY_RV:
4556 add_gfx9_modifiers(adev, mods, &size, &capacity);
4557 break;
4558 case AMDGPU_FAMILY_NV:
4559 case AMDGPU_FAMILY_VGH:
4560 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4561 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4562 else
4563 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4564 break;
4565 }
4566
4567 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4568
4569 /* INVALID marks the end of the list. */
4570 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4571
4572 if (!*mods)
4573 return -ENOMEM;
4574
4575 return 0;
4576 }
4577
4578 static int
4579 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4580 const struct amdgpu_framebuffer *afb,
4581 const enum surface_pixel_format format,
4582 const enum dc_rotation_angle rotation,
4583 const struct plane_size *plane_size,
4584 union dc_tiling_info *tiling_info,
4585 struct dc_plane_dcc_param *dcc,
4586 struct dc_plane_address *address,
4587 const bool force_disable_dcc)
4588 {
4589 const uint64_t modifier = afb->base.modifier;
4590 int ret;
4591
4592 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4593 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4594
4595 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4596 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4597
4598 dcc->enable = 1;
4599 dcc->meta_pitch = afb->base.pitches[1];
4600 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4601
4602 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4603 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4604 }
4605
4606 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4607 if (ret)
4608 return ret;
4609
4610 return 0;
4611 }
4612
4613 static int
4614 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4615 const struct amdgpu_framebuffer *afb,
4616 const enum surface_pixel_format format,
4617 const enum dc_rotation_angle rotation,
4618 const uint64_t tiling_flags,
4619 union dc_tiling_info *tiling_info,
4620 struct plane_size *plane_size,
4621 struct dc_plane_dcc_param *dcc,
4622 struct dc_plane_address *address,
4623 bool tmz_surface,
4624 bool force_disable_dcc)
4625 {
4626 const struct drm_framebuffer *fb = &afb->base;
4627 int ret;
4628
4629 memset(tiling_info, 0, sizeof(*tiling_info));
4630 memset(plane_size, 0, sizeof(*plane_size));
4631 memset(dcc, 0, sizeof(*dcc));
4632 memset(address, 0, sizeof(*address));
4633
4634 address->tmz_surface = tmz_surface;
4635
4636 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4637 uint64_t addr = afb->address + fb->offsets[0];
4638
4639 plane_size->surface_size.x = 0;
4640 plane_size->surface_size.y = 0;
4641 plane_size->surface_size.width = fb->width;
4642 plane_size->surface_size.height = fb->height;
4643 plane_size->surface_pitch =
4644 fb->pitches[0] / fb->format->cpp[0];
4645
4646 address->type = PLN_ADDR_TYPE_GRAPHICS;
4647 address->grph.addr.low_part = lower_32_bits(addr);
4648 address->grph.addr.high_part = upper_32_bits(addr);
4649 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4650 uint64_t luma_addr = afb->address + fb->offsets[0];
4651 uint64_t chroma_addr = afb->address + fb->offsets[1];
4652
4653 plane_size->surface_size.x = 0;
4654 plane_size->surface_size.y = 0;
4655 plane_size->surface_size.width = fb->width;
4656 plane_size->surface_size.height = fb->height;
4657 plane_size->surface_pitch =
4658 fb->pitches[0] / fb->format->cpp[0];
4659
4660 plane_size->chroma_size.x = 0;
4661 plane_size->chroma_size.y = 0;
4662 /* TODO: set these based on surface format */
4663 plane_size->chroma_size.width = fb->width / 2;
4664 plane_size->chroma_size.height = fb->height / 2;
4665
4666 plane_size->chroma_pitch =
4667 fb->pitches[1] / fb->format->cpp[1];
4668
4669 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4670 address->video_progressive.luma_addr.low_part =
4671 lower_32_bits(luma_addr);
4672 address->video_progressive.luma_addr.high_part =
4673 upper_32_bits(luma_addr);
4674 address->video_progressive.chroma_addr.low_part =
4675 lower_32_bits(chroma_addr);
4676 address->video_progressive.chroma_addr.high_part =
4677 upper_32_bits(chroma_addr);
4678 }
4679
4680 if (adev->family >= AMDGPU_FAMILY_AI) {
4681 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4682 rotation, plane_size,
4683 tiling_info, dcc,
4684 address,
4685 force_disable_dcc);
4686 if (ret)
4687 return ret;
4688 } else {
4689 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4690 }
4691
4692 return 0;
4693 }
4694
4695 static void
4696 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4697 bool *per_pixel_alpha, bool *global_alpha,
4698 int *global_alpha_value)
4699 {
4700 *per_pixel_alpha = false;
4701 *global_alpha = false;
4702 *global_alpha_value = 0xff;
4703
4704 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4705 return;
4706
4707 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4708 static const uint32_t alpha_formats[] = {
4709 DRM_FORMAT_ARGB8888,
4710 DRM_FORMAT_RGBA8888,
4711 DRM_FORMAT_ABGR8888,
4712 };
4713 uint32_t format = plane_state->fb->format->format;
4714 unsigned int i;
4715
4716 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4717 if (format == alpha_formats[i]) {
4718 *per_pixel_alpha = true;
4719 break;
4720 }
4721 }
4722 }
4723
4724 if (plane_state->alpha < 0xffff) {
4725 *global_alpha = true;
4726 *global_alpha_value = plane_state->alpha >> 8;
4727 }
4728 }
4729
4730 static int
4731 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4732 const enum surface_pixel_format format,
4733 enum dc_color_space *color_space)
4734 {
4735 bool full_range;
4736
4737 *color_space = COLOR_SPACE_SRGB;
4738
4739 /* DRM color properties only affect non-RGB formats. */
4740 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4741 return 0;
4742
4743 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4744
4745 switch (plane_state->color_encoding) {
4746 case DRM_COLOR_YCBCR_BT601:
4747 if (full_range)
4748 *color_space = COLOR_SPACE_YCBCR601;
4749 else
4750 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4751 break;
4752
4753 case DRM_COLOR_YCBCR_BT709:
4754 if (full_range)
4755 *color_space = COLOR_SPACE_YCBCR709;
4756 else
4757 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4758 break;
4759
4760 case DRM_COLOR_YCBCR_BT2020:
4761 if (full_range)
4762 *color_space = COLOR_SPACE_2020_YCBCR;
4763 else
4764 return -EINVAL;
4765 break;
4766
4767 default:
4768 return -EINVAL;
4769 }
4770
4771 return 0;
4772 }
4773
4774 static int
4775 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4776 const struct drm_plane_state *plane_state,
4777 const uint64_t tiling_flags,
4778 struct dc_plane_info *plane_info,
4779 struct dc_plane_address *address,
4780 bool tmz_surface,
4781 bool force_disable_dcc)
4782 {
4783 const struct drm_framebuffer *fb = plane_state->fb;
4784 const struct amdgpu_framebuffer *afb =
4785 to_amdgpu_framebuffer(plane_state->fb);
4786 int ret;
4787
4788 memset(plane_info, 0, sizeof(*plane_info));
4789
4790 switch (fb->format->format) {
4791 case DRM_FORMAT_C8:
4792 plane_info->format =
4793 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4794 break;
4795 case DRM_FORMAT_RGB565:
4796 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4797 break;
4798 case DRM_FORMAT_XRGB8888:
4799 case DRM_FORMAT_ARGB8888:
4800 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4801 break;
4802 case DRM_FORMAT_XRGB2101010:
4803 case DRM_FORMAT_ARGB2101010:
4804 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4805 break;
4806 case DRM_FORMAT_XBGR2101010:
4807 case DRM_FORMAT_ABGR2101010:
4808 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4809 break;
4810 case DRM_FORMAT_XBGR8888:
4811 case DRM_FORMAT_ABGR8888:
4812 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4813 break;
4814 case DRM_FORMAT_NV21:
4815 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4816 break;
4817 case DRM_FORMAT_NV12:
4818 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4819 break;
4820 case DRM_FORMAT_P010:
4821 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4822 break;
4823 case DRM_FORMAT_XRGB16161616F:
4824 case DRM_FORMAT_ARGB16161616F:
4825 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4826 break;
4827 case DRM_FORMAT_XBGR16161616F:
4828 case DRM_FORMAT_ABGR16161616F:
4829 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4830 break;
4831 default:
4832 DRM_ERROR(
4833 "Unsupported screen format %p4cc\n",
4834 &fb->format->format);
4835 return -EINVAL;
4836 }
4837
4838 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4839 case DRM_MODE_ROTATE_0:
4840 plane_info->rotation = ROTATION_ANGLE_0;
4841 break;
4842 case DRM_MODE_ROTATE_90:
4843 plane_info->rotation = ROTATION_ANGLE_90;
4844 break;
4845 case DRM_MODE_ROTATE_180:
4846 plane_info->rotation = ROTATION_ANGLE_180;
4847 break;
4848 case DRM_MODE_ROTATE_270:
4849 plane_info->rotation = ROTATION_ANGLE_270;
4850 break;
4851 default:
4852 plane_info->rotation = ROTATION_ANGLE_0;
4853 break;
4854 }
4855
4856 plane_info->visible = true;
4857 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4858
4859 plane_info->layer_index = 0;
4860
4861 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4862 &plane_info->color_space);
4863 if (ret)
4864 return ret;
4865
4866 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4867 plane_info->rotation, tiling_flags,
4868 &plane_info->tiling_info,
4869 &plane_info->plane_size,
4870 &plane_info->dcc, address, tmz_surface,
4871 force_disable_dcc);
4872 if (ret)
4873 return ret;
4874
4875 fill_blending_from_plane_state(
4876 plane_state, &plane_info->per_pixel_alpha,
4877 &plane_info->global_alpha, &plane_info->global_alpha_value);
4878
4879 return 0;
4880 }
4881
4882 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4883 struct dc_plane_state *dc_plane_state,
4884 struct drm_plane_state *plane_state,
4885 struct drm_crtc_state *crtc_state)
4886 {
4887 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4888 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4889 struct dc_scaling_info scaling_info;
4890 struct dc_plane_info plane_info;
4891 int ret;
4892 bool force_disable_dcc = false;
4893
4894 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4895 if (ret)
4896 return ret;
4897
4898 dc_plane_state->src_rect = scaling_info.src_rect;
4899 dc_plane_state->dst_rect = scaling_info.dst_rect;
4900 dc_plane_state->clip_rect = scaling_info.clip_rect;
4901 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4902
4903 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4904 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4905 afb->tiling_flags,
4906 &plane_info,
4907 &dc_plane_state->address,
4908 afb->tmz_surface,
4909 force_disable_dcc);
4910 if (ret)
4911 return ret;
4912
4913 dc_plane_state->format = plane_info.format;
4914 dc_plane_state->color_space = plane_info.color_space;
4915 dc_plane_state->format = plane_info.format;
4916 dc_plane_state->plane_size = plane_info.plane_size;
4917 dc_plane_state->rotation = plane_info.rotation;
4918 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4919 dc_plane_state->stereo_format = plane_info.stereo_format;
4920 dc_plane_state->tiling_info = plane_info.tiling_info;
4921 dc_plane_state->visible = plane_info.visible;
4922 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4923 dc_plane_state->global_alpha = plane_info.global_alpha;
4924 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4925 dc_plane_state->dcc = plane_info.dcc;
4926 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4927 dc_plane_state->flip_int_enabled = true;
4928
4929 /*
4930 * Always set input transfer function, since plane state is refreshed
4931 * every time.
4932 */
4933 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4934 if (ret)
4935 return ret;
4936
4937 return 0;
4938 }
4939
4940 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4941 const struct dm_connector_state *dm_state,
4942 struct dc_stream_state *stream)
4943 {
4944 enum amdgpu_rmx_type rmx_type;
4945
4946 struct rect src = { 0 }; /* viewport in composition space*/
4947 struct rect dst = { 0 }; /* stream addressable area */
4948
4949 /* no mode. nothing to be done */
4950 if (!mode)
4951 return;
4952
4953 /* Full screen scaling by default */
4954 src.width = mode->hdisplay;
4955 src.height = mode->vdisplay;
4956 dst.width = stream->timing.h_addressable;
4957 dst.height = stream->timing.v_addressable;
4958
4959 if (dm_state) {
4960 rmx_type = dm_state->scaling;
4961 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4962 if (src.width * dst.height <
4963 src.height * dst.width) {
4964 /* height needs less upscaling/more downscaling */
4965 dst.width = src.width *
4966 dst.height / src.height;
4967 } else {
4968 /* width needs less upscaling/more downscaling */
4969 dst.height = src.height *
4970 dst.width / src.width;
4971 }
4972 } else if (rmx_type == RMX_CENTER) {
4973 dst = src;
4974 }
4975
4976 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4977 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4978
4979 if (dm_state->underscan_enable) {
4980 dst.x += dm_state->underscan_hborder / 2;
4981 dst.y += dm_state->underscan_vborder / 2;
4982 dst.width -= dm_state->underscan_hborder;
4983 dst.height -= dm_state->underscan_vborder;
4984 }
4985 }
4986
4987 stream->src = src;
4988 stream->dst = dst;
4989
4990 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4991 dst.x, dst.y, dst.width, dst.height);
4992
4993 }
4994
4995 static enum dc_color_depth
4996 convert_color_depth_from_display_info(const struct drm_connector *connector,
4997 bool is_y420, int requested_bpc)
4998 {
4999 uint8_t bpc;
5000
5001 if (is_y420) {
5002 bpc = 8;
5003
5004 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5005 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5006 bpc = 16;
5007 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5008 bpc = 12;
5009 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5010 bpc = 10;
5011 } else {
5012 bpc = (uint8_t)connector->display_info.bpc;
5013 /* Assume 8 bpc by default if no bpc is specified. */
5014 bpc = bpc ? bpc : 8;
5015 }
5016
5017 if (requested_bpc > 0) {
5018 /*
5019 * Cap display bpc based on the user requested value.
5020 *
5021 * The value for state->max_bpc may not correctly updated
5022 * depending on when the connector gets added to the state
5023 * or if this was called outside of atomic check, so it
5024 * can't be used directly.
5025 */
5026 bpc = min_t(u8, bpc, requested_bpc);
5027
5028 /* Round down to the nearest even number. */
5029 bpc = bpc - (bpc & 1);
5030 }
5031
5032 switch (bpc) {
5033 case 0:
5034 /*
5035 * Temporary Work around, DRM doesn't parse color depth for
5036 * EDID revision before 1.4
5037 * TODO: Fix edid parsing
5038 */
5039 return COLOR_DEPTH_888;
5040 case 6:
5041 return COLOR_DEPTH_666;
5042 case 8:
5043 return COLOR_DEPTH_888;
5044 case 10:
5045 return COLOR_DEPTH_101010;
5046 case 12:
5047 return COLOR_DEPTH_121212;
5048 case 14:
5049 return COLOR_DEPTH_141414;
5050 case 16:
5051 return COLOR_DEPTH_161616;
5052 default:
5053 return COLOR_DEPTH_UNDEFINED;
5054 }
5055 }
5056
5057 static enum dc_aspect_ratio
5058 get_aspect_ratio(const struct drm_display_mode *mode_in)
5059 {
5060 /* 1-1 mapping, since both enums follow the HDMI spec. */
5061 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5062 }
5063
5064 static enum dc_color_space
5065 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5066 {
5067 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5068
5069 switch (dc_crtc_timing->pixel_encoding) {
5070 case PIXEL_ENCODING_YCBCR422:
5071 case PIXEL_ENCODING_YCBCR444:
5072 case PIXEL_ENCODING_YCBCR420:
5073 {
5074 /*
5075 * 27030khz is the separation point between HDTV and SDTV
5076 * according to HDMI spec, we use YCbCr709 and YCbCr601
5077 * respectively
5078 */
5079 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5080 if (dc_crtc_timing->flags.Y_ONLY)
5081 color_space =
5082 COLOR_SPACE_YCBCR709_LIMITED;
5083 else
5084 color_space = COLOR_SPACE_YCBCR709;
5085 } else {
5086 if (dc_crtc_timing->flags.Y_ONLY)
5087 color_space =
5088 COLOR_SPACE_YCBCR601_LIMITED;
5089 else
5090 color_space = COLOR_SPACE_YCBCR601;
5091 }
5092
5093 }
5094 break;
5095 case PIXEL_ENCODING_RGB:
5096 color_space = COLOR_SPACE_SRGB;
5097 break;
5098
5099 default:
5100 WARN_ON(1);
5101 break;
5102 }
5103
5104 return color_space;
5105 }
5106
5107 static bool adjust_colour_depth_from_display_info(
5108 struct dc_crtc_timing *timing_out,
5109 const struct drm_display_info *info)
5110 {
5111 enum dc_color_depth depth = timing_out->display_color_depth;
5112 int normalized_clk;
5113 do {
5114 normalized_clk = timing_out->pix_clk_100hz / 10;
5115 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5116 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5117 normalized_clk /= 2;
5118 /* Adjusting pix clock following on HDMI spec based on colour depth */
5119 switch (depth) {
5120 case COLOR_DEPTH_888:
5121 break;
5122 case COLOR_DEPTH_101010:
5123 normalized_clk = (normalized_clk * 30) / 24;
5124 break;
5125 case COLOR_DEPTH_121212:
5126 normalized_clk = (normalized_clk * 36) / 24;
5127 break;
5128 case COLOR_DEPTH_161616:
5129 normalized_clk = (normalized_clk * 48) / 24;
5130 break;
5131 default:
5132 /* The above depths are the only ones valid for HDMI. */
5133 return false;
5134 }
5135 if (normalized_clk <= info->max_tmds_clock) {
5136 timing_out->display_color_depth = depth;
5137 return true;
5138 }
5139 } while (--depth > COLOR_DEPTH_666);
5140 return false;
5141 }
5142
5143 static void fill_stream_properties_from_drm_display_mode(
5144 struct dc_stream_state *stream,
5145 const struct drm_display_mode *mode_in,
5146 const struct drm_connector *connector,
5147 const struct drm_connector_state *connector_state,
5148 const struct dc_stream_state *old_stream,
5149 int requested_bpc)
5150 {
5151 struct dc_crtc_timing *timing_out = &stream->timing;
5152 const struct drm_display_info *info = &connector->display_info;
5153 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5154 struct hdmi_vendor_infoframe hv_frame;
5155 struct hdmi_avi_infoframe avi_frame;
5156
5157 memset(&hv_frame, 0, sizeof(hv_frame));
5158 memset(&avi_frame, 0, sizeof(avi_frame));
5159
5160 timing_out->h_border_left = 0;
5161 timing_out->h_border_right = 0;
5162 timing_out->v_border_top = 0;
5163 timing_out->v_border_bottom = 0;
5164 /* TODO: un-hardcode */
5165 if (drm_mode_is_420_only(info, mode_in)
5166 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5167 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5168 else if (drm_mode_is_420_also(info, mode_in)
5169 && aconnector->force_yuv420_output)
5170 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5171 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5172 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5173 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5174 else
5175 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5176
5177 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5178 timing_out->display_color_depth = convert_color_depth_from_display_info(
5179 connector,
5180 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5181 requested_bpc);
5182 timing_out->scan_type = SCANNING_TYPE_NODATA;
5183 timing_out->hdmi_vic = 0;
5184
5185 if(old_stream) {
5186 timing_out->vic = old_stream->timing.vic;
5187 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5188 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5189 } else {
5190 timing_out->vic = drm_match_cea_mode(mode_in);
5191 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5192 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5193 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5194 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5195 }
5196
5197 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5198 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5199 timing_out->vic = avi_frame.video_code;
5200 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5201 timing_out->hdmi_vic = hv_frame.vic;
5202 }
5203
5204 if (is_freesync_video_mode(mode_in, aconnector)) {
5205 timing_out->h_addressable = mode_in->hdisplay;
5206 timing_out->h_total = mode_in->htotal;
5207 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5208 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5209 timing_out->v_total = mode_in->vtotal;
5210 timing_out->v_addressable = mode_in->vdisplay;
5211 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5212 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5213 timing_out->pix_clk_100hz = mode_in->clock * 10;
5214 } else {
5215 timing_out->h_addressable = mode_in->crtc_hdisplay;
5216 timing_out->h_total = mode_in->crtc_htotal;
5217 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5218 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5219 timing_out->v_total = mode_in->crtc_vtotal;
5220 timing_out->v_addressable = mode_in->crtc_vdisplay;
5221 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5222 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5223 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5224 }
5225
5226 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5227
5228 stream->output_color_space = get_output_color_space(timing_out);
5229
5230 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5231 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5232 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5233 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5234 drm_mode_is_420_also(info, mode_in) &&
5235 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5236 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5237 adjust_colour_depth_from_display_info(timing_out, info);
5238 }
5239 }
5240 }
5241
5242 static void fill_audio_info(struct audio_info *audio_info,
5243 const struct drm_connector *drm_connector,
5244 const struct dc_sink *dc_sink)
5245 {
5246 int i = 0;
5247 int cea_revision = 0;
5248 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5249
5250 audio_info->manufacture_id = edid_caps->manufacturer_id;
5251 audio_info->product_id = edid_caps->product_id;
5252
5253 cea_revision = drm_connector->display_info.cea_rev;
5254
5255 strscpy(audio_info->display_name,
5256 edid_caps->display_name,
5257 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5258
5259 if (cea_revision >= 3) {
5260 audio_info->mode_count = edid_caps->audio_mode_count;
5261
5262 for (i = 0; i < audio_info->mode_count; ++i) {
5263 audio_info->modes[i].format_code =
5264 (enum audio_format_code)
5265 (edid_caps->audio_modes[i].format_code);
5266 audio_info->modes[i].channel_count =
5267 edid_caps->audio_modes[i].channel_count;
5268 audio_info->modes[i].sample_rates.all =
5269 edid_caps->audio_modes[i].sample_rate;
5270 audio_info->modes[i].sample_size =
5271 edid_caps->audio_modes[i].sample_size;
5272 }
5273 }
5274
5275 audio_info->flags.all = edid_caps->speaker_flags;
5276
5277 /* TODO: We only check for the progressive mode, check for interlace mode too */
5278 if (drm_connector->latency_present[0]) {
5279 audio_info->video_latency = drm_connector->video_latency[0];
5280 audio_info->audio_latency = drm_connector->audio_latency[0];
5281 }
5282
5283 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5284
5285 }
5286
5287 static void
5288 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5289 struct drm_display_mode *dst_mode)
5290 {
5291 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5292 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5293 dst_mode->crtc_clock = src_mode->crtc_clock;
5294 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5295 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5296 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5297 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5298 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5299 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5300 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5301 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5302 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5303 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5304 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5305 }
5306
5307 static void
5308 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5309 const struct drm_display_mode *native_mode,
5310 bool scale_enabled)
5311 {
5312 if (scale_enabled) {
5313 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5314 } else if (native_mode->clock == drm_mode->clock &&
5315 native_mode->htotal == drm_mode->htotal &&
5316 native_mode->vtotal == drm_mode->vtotal) {
5317 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5318 } else {
5319 /* no scaling nor amdgpu inserted, no need to patch */
5320 }
5321 }
5322
5323 static struct dc_sink *
5324 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5325 {
5326 struct dc_sink_init_data sink_init_data = { 0 };
5327 struct dc_sink *sink = NULL;
5328 sink_init_data.link = aconnector->dc_link;
5329 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5330
5331 sink = dc_sink_create(&sink_init_data);
5332 if (!sink) {
5333 DRM_ERROR("Failed to create sink!\n");
5334 return NULL;
5335 }
5336 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5337
5338 return sink;
5339 }
5340
5341 static void set_multisync_trigger_params(
5342 struct dc_stream_state *stream)
5343 {
5344 struct dc_stream_state *master = NULL;
5345
5346 if (stream->triggered_crtc_reset.enabled) {
5347 master = stream->triggered_crtc_reset.event_source;
5348 stream->triggered_crtc_reset.event =
5349 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5350 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5351 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5352 }
5353 }
5354
5355 static void set_master_stream(struct dc_stream_state *stream_set[],
5356 int stream_count)
5357 {
5358 int j, highest_rfr = 0, master_stream = 0;
5359
5360 for (j = 0; j < stream_count; j++) {
5361 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5362 int refresh_rate = 0;
5363
5364 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5365 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5366 if (refresh_rate > highest_rfr) {
5367 highest_rfr = refresh_rate;
5368 master_stream = j;
5369 }
5370 }
5371 }
5372 for (j = 0; j < stream_count; j++) {
5373 if (stream_set[j])
5374 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5375 }
5376 }
5377
5378 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5379 {
5380 int i = 0;
5381 struct dc_stream_state *stream;
5382
5383 if (context->stream_count < 2)
5384 return;
5385 for (i = 0; i < context->stream_count ; i++) {
5386 if (!context->streams[i])
5387 continue;
5388 /*
5389 * TODO: add a function to read AMD VSDB bits and set
5390 * crtc_sync_master.multi_sync_enabled flag
5391 * For now it's set to false
5392 */
5393 }
5394
5395 set_master_stream(context->streams, context->stream_count);
5396
5397 for (i = 0; i < context->stream_count ; i++) {
5398 stream = context->streams[i];
5399
5400 if (!stream)
5401 continue;
5402
5403 set_multisync_trigger_params(stream);
5404 }
5405 }
5406
5407 static struct drm_display_mode *
5408 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5409 bool use_probed_modes)
5410 {
5411 struct drm_display_mode *m, *m_pref = NULL;
5412 u16 current_refresh, highest_refresh;
5413 struct list_head *list_head = use_probed_modes ?
5414 &aconnector->base.probed_modes :
5415 &aconnector->base.modes;
5416
5417 if (aconnector->freesync_vid_base.clock != 0)
5418 return &aconnector->freesync_vid_base;
5419
5420 /* Find the preferred mode */
5421 list_for_each_entry (m, list_head, head) {
5422 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5423 m_pref = m;
5424 break;
5425 }
5426 }
5427
5428 if (!m_pref) {
5429 /* Probably an EDID with no preferred mode. Fallback to first entry */
5430 m_pref = list_first_entry_or_null(
5431 &aconnector->base.modes, struct drm_display_mode, head);
5432 if (!m_pref) {
5433 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5434 return NULL;
5435 }
5436 }
5437
5438 highest_refresh = drm_mode_vrefresh(m_pref);
5439
5440 /*
5441 * Find the mode with highest refresh rate with same resolution.
5442 * For some monitors, preferred mode is not the mode with highest
5443 * supported refresh rate.
5444 */
5445 list_for_each_entry (m, list_head, head) {
5446 current_refresh = drm_mode_vrefresh(m);
5447
5448 if (m->hdisplay == m_pref->hdisplay &&
5449 m->vdisplay == m_pref->vdisplay &&
5450 highest_refresh < current_refresh) {
5451 highest_refresh = current_refresh;
5452 m_pref = m;
5453 }
5454 }
5455
5456 aconnector->freesync_vid_base = *m_pref;
5457 return m_pref;
5458 }
5459
5460 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5461 struct amdgpu_dm_connector *aconnector)
5462 {
5463 struct drm_display_mode *high_mode;
5464 int timing_diff;
5465
5466 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5467 if (!high_mode || !mode)
5468 return false;
5469
5470 timing_diff = high_mode->vtotal - mode->vtotal;
5471
5472 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5473 high_mode->hdisplay != mode->hdisplay ||
5474 high_mode->vdisplay != mode->vdisplay ||
5475 high_mode->hsync_start != mode->hsync_start ||
5476 high_mode->hsync_end != mode->hsync_end ||
5477 high_mode->htotal != mode->htotal ||
5478 high_mode->hskew != mode->hskew ||
5479 high_mode->vscan != mode->vscan ||
5480 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5481 high_mode->vsync_end - mode->vsync_end != timing_diff)
5482 return false;
5483 else
5484 return true;
5485 }
5486
5487 static struct dc_stream_state *
5488 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5489 const struct drm_display_mode *drm_mode,
5490 const struct dm_connector_state *dm_state,
5491 const struct dc_stream_state *old_stream,
5492 int requested_bpc)
5493 {
5494 struct drm_display_mode *preferred_mode = NULL;
5495 struct drm_connector *drm_connector;
5496 const struct drm_connector_state *con_state =
5497 dm_state ? &dm_state->base : NULL;
5498 struct dc_stream_state *stream = NULL;
5499 struct drm_display_mode mode = *drm_mode;
5500 struct drm_display_mode saved_mode;
5501 struct drm_display_mode *freesync_mode = NULL;
5502 bool native_mode_found = false;
5503 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5504 int mode_refresh;
5505 int preferred_refresh = 0;
5506 #if defined(CONFIG_DRM_AMD_DC_DCN)
5507 struct dsc_dec_dpcd_caps dsc_caps;
5508 uint32_t link_bandwidth_kbps;
5509 #endif
5510 struct dc_sink *sink = NULL;
5511
5512 memset(&saved_mode, 0, sizeof(saved_mode));
5513
5514 if (aconnector == NULL) {
5515 DRM_ERROR("aconnector is NULL!\n");
5516 return stream;
5517 }
5518
5519 drm_connector = &aconnector->base;
5520
5521 if (!aconnector->dc_sink) {
5522 sink = create_fake_sink(aconnector);
5523 if (!sink)
5524 return stream;
5525 } else {
5526 sink = aconnector->dc_sink;
5527 dc_sink_retain(sink);
5528 }
5529
5530 stream = dc_create_stream_for_sink(sink);
5531
5532 if (stream == NULL) {
5533 DRM_ERROR("Failed to create stream for sink!\n");
5534 goto finish;
5535 }
5536
5537 stream->dm_stream_context = aconnector;
5538
5539 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5540 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5541
5542 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5543 /* Search for preferred mode */
5544 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5545 native_mode_found = true;
5546 break;
5547 }
5548 }
5549 if (!native_mode_found)
5550 preferred_mode = list_first_entry_or_null(
5551 &aconnector->base.modes,
5552 struct drm_display_mode,
5553 head);
5554
5555 mode_refresh = drm_mode_vrefresh(&mode);
5556
5557 if (preferred_mode == NULL) {
5558 /*
5559 * This may not be an error, the use case is when we have no
5560 * usermode calls to reset and set mode upon hotplug. In this
5561 * case, we call set mode ourselves to restore the previous mode
5562 * and the modelist may not be filled in in time.
5563 */
5564 DRM_DEBUG_DRIVER("No preferred mode found\n");
5565 } else {
5566 recalculate_timing |= amdgpu_freesync_vid_mode &&
5567 is_freesync_video_mode(&mode, aconnector);
5568 if (recalculate_timing) {
5569 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5570 saved_mode = mode;
5571 mode = *freesync_mode;
5572 } else {
5573 decide_crtc_timing_for_drm_display_mode(
5574 &mode, preferred_mode,
5575 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5576 }
5577
5578 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5579 }
5580
5581 if (recalculate_timing)
5582 drm_mode_set_crtcinfo(&saved_mode, 0);
5583 else if (!dm_state)
5584 drm_mode_set_crtcinfo(&mode, 0);
5585
5586 /*
5587 * If scaling is enabled and refresh rate didn't change
5588 * we copy the vic and polarities of the old timings
5589 */
5590 if (!recalculate_timing || mode_refresh != preferred_refresh)
5591 fill_stream_properties_from_drm_display_mode(
5592 stream, &mode, &aconnector->base, con_state, NULL,
5593 requested_bpc);
5594 else
5595 fill_stream_properties_from_drm_display_mode(
5596 stream, &mode, &aconnector->base, con_state, old_stream,
5597 requested_bpc);
5598
5599 stream->timing.flags.DSC = 0;
5600
5601 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5602 #if defined(CONFIG_DRM_AMD_DC_DCN)
5603 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5604 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5605 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5606 &dsc_caps);
5607 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5608 dc_link_get_link_cap(aconnector->dc_link));
5609
5610 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5611 /* Set DSC policy according to dsc_clock_en */
5612 dc_dsc_policy_set_enable_dsc_when_not_needed(
5613 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5614
5615 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5616 &dsc_caps,
5617 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5618 0,
5619 link_bandwidth_kbps,
5620 &stream->timing,
5621 &stream->timing.dsc_cfg))
5622 stream->timing.flags.DSC = 1;
5623 /* Overwrite the stream flag if DSC is enabled through debugfs */
5624 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5625 stream->timing.flags.DSC = 1;
5626
5627 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5628 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5629
5630 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5631 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5632
5633 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5634 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5635 }
5636 #endif
5637 }
5638
5639 update_stream_scaling_settings(&mode, dm_state, stream);
5640
5641 fill_audio_info(
5642 &stream->audio_info,
5643 drm_connector,
5644 sink);
5645
5646 update_stream_signal(stream, sink);
5647
5648 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5649 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5650
5651 if (stream->link->psr_settings.psr_feature_enabled) {
5652 //
5653 // should decide stream support vsc sdp colorimetry capability
5654 // before building vsc info packet
5655 //
5656 stream->use_vsc_sdp_for_colorimetry = false;
5657 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5658 stream->use_vsc_sdp_for_colorimetry =
5659 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5660 } else {
5661 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5662 stream->use_vsc_sdp_for_colorimetry = true;
5663 }
5664 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5665 }
5666 finish:
5667 dc_sink_release(sink);
5668
5669 return stream;
5670 }
5671
5672 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5673 {
5674 drm_crtc_cleanup(crtc);
5675 kfree(crtc);
5676 }
5677
5678 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5679 struct drm_crtc_state *state)
5680 {
5681 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5682
5683 /* TODO Destroy dc_stream objects are stream object is flattened */
5684 if (cur->stream)
5685 dc_stream_release(cur->stream);
5686
5687
5688 __drm_atomic_helper_crtc_destroy_state(state);
5689
5690
5691 kfree(state);
5692 }
5693
5694 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5695 {
5696 struct dm_crtc_state *state;
5697
5698 if (crtc->state)
5699 dm_crtc_destroy_state(crtc, crtc->state);
5700
5701 state = kzalloc(sizeof(*state), GFP_KERNEL);
5702 if (WARN_ON(!state))
5703 return;
5704
5705 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5706 }
5707
5708 static struct drm_crtc_state *
5709 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5710 {
5711 struct dm_crtc_state *state, *cur;
5712
5713 cur = to_dm_crtc_state(crtc->state);
5714
5715 if (WARN_ON(!crtc->state))
5716 return NULL;
5717
5718 state = kzalloc(sizeof(*state), GFP_KERNEL);
5719 if (!state)
5720 return NULL;
5721
5722 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5723
5724 if (cur->stream) {
5725 state->stream = cur->stream;
5726 dc_stream_retain(state->stream);
5727 }
5728
5729 state->active_planes = cur->active_planes;
5730 state->vrr_infopacket = cur->vrr_infopacket;
5731 state->abm_level = cur->abm_level;
5732 state->vrr_supported = cur->vrr_supported;
5733 state->freesync_config = cur->freesync_config;
5734 state->cm_has_degamma = cur->cm_has_degamma;
5735 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5736 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5737
5738 return &state->base;
5739 }
5740
5741 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5742 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5743 {
5744 crtc_debugfs_init(crtc);
5745
5746 return 0;
5747 }
5748 #endif
5749
5750 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5751 {
5752 enum dc_irq_source irq_source;
5753 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5754 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5755 int rc;
5756
5757 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5758
5759 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5760
5761 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5762 acrtc->crtc_id, enable ? "en" : "dis", rc);
5763 return rc;
5764 }
5765
5766 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5767 {
5768 enum dc_irq_source irq_source;
5769 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5770 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5771 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5772 #if defined(CONFIG_DRM_AMD_DC_DCN)
5773 struct amdgpu_display_manager *dm = &adev->dm;
5774 unsigned long flags;
5775 #endif
5776 int rc = 0;
5777
5778 if (enable) {
5779 /* vblank irq on -> Only need vupdate irq in vrr mode */
5780 if (amdgpu_dm_vrr_active(acrtc_state))
5781 rc = dm_set_vupdate_irq(crtc, true);
5782 } else {
5783 /* vblank irq off -> vupdate irq off */
5784 rc = dm_set_vupdate_irq(crtc, false);
5785 }
5786
5787 if (rc)
5788 return rc;
5789
5790 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5791
5792 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5793 return -EBUSY;
5794
5795 if (amdgpu_in_reset(adev))
5796 return 0;
5797
5798 #if defined(CONFIG_DRM_AMD_DC_DCN)
5799 spin_lock_irqsave(&dm->vblank_lock, flags);
5800 dm->vblank_workqueue->dm = dm;
5801 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5802 dm->vblank_workqueue->enable = enable;
5803 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5804 schedule_work(&dm->vblank_workqueue->mall_work);
5805 #endif
5806
5807 return 0;
5808 }
5809
5810 static int dm_enable_vblank(struct drm_crtc *crtc)
5811 {
5812 return dm_set_vblank(crtc, true);
5813 }
5814
5815 static void dm_disable_vblank(struct drm_crtc *crtc)
5816 {
5817 dm_set_vblank(crtc, false);
5818 }
5819
5820 /* Implemented only the options currently availible for the driver */
5821 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5822 .reset = dm_crtc_reset_state,
5823 .destroy = amdgpu_dm_crtc_destroy,
5824 .set_config = drm_atomic_helper_set_config,
5825 .page_flip = drm_atomic_helper_page_flip,
5826 .atomic_duplicate_state = dm_crtc_duplicate_state,
5827 .atomic_destroy_state = dm_crtc_destroy_state,
5828 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5829 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5830 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5831 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5832 .enable_vblank = dm_enable_vblank,
5833 .disable_vblank = dm_disable_vblank,
5834 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5835 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5836 .late_register = amdgpu_dm_crtc_late_register,
5837 #endif
5838 };
5839
5840 static enum drm_connector_status
5841 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5842 {
5843 bool connected;
5844 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5845
5846 /*
5847 * Notes:
5848 * 1. This interface is NOT called in context of HPD irq.
5849 * 2. This interface *is called* in context of user-mode ioctl. Which
5850 * makes it a bad place for *any* MST-related activity.
5851 */
5852
5853 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5854 !aconnector->fake_enable)
5855 connected = (aconnector->dc_sink != NULL);
5856 else
5857 connected = (aconnector->base.force == DRM_FORCE_ON);
5858
5859 update_subconnector_property(aconnector);
5860
5861 return (connected ? connector_status_connected :
5862 connector_status_disconnected);
5863 }
5864
5865 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5866 struct drm_connector_state *connector_state,
5867 struct drm_property *property,
5868 uint64_t val)
5869 {
5870 struct drm_device *dev = connector->dev;
5871 struct amdgpu_device *adev = drm_to_adev(dev);
5872 struct dm_connector_state *dm_old_state =
5873 to_dm_connector_state(connector->state);
5874 struct dm_connector_state *dm_new_state =
5875 to_dm_connector_state(connector_state);
5876
5877 int ret = -EINVAL;
5878
5879 if (property == dev->mode_config.scaling_mode_property) {
5880 enum amdgpu_rmx_type rmx_type;
5881
5882 switch (val) {
5883 case DRM_MODE_SCALE_CENTER:
5884 rmx_type = RMX_CENTER;
5885 break;
5886 case DRM_MODE_SCALE_ASPECT:
5887 rmx_type = RMX_ASPECT;
5888 break;
5889 case DRM_MODE_SCALE_FULLSCREEN:
5890 rmx_type = RMX_FULL;
5891 break;
5892 case DRM_MODE_SCALE_NONE:
5893 default:
5894 rmx_type = RMX_OFF;
5895 break;
5896 }
5897
5898 if (dm_old_state->scaling == rmx_type)
5899 return 0;
5900
5901 dm_new_state->scaling = rmx_type;
5902 ret = 0;
5903 } else if (property == adev->mode_info.underscan_hborder_property) {
5904 dm_new_state->underscan_hborder = val;
5905 ret = 0;
5906 } else if (property == adev->mode_info.underscan_vborder_property) {
5907 dm_new_state->underscan_vborder = val;
5908 ret = 0;
5909 } else if (property == adev->mode_info.underscan_property) {
5910 dm_new_state->underscan_enable = val;
5911 ret = 0;
5912 } else if (property == adev->mode_info.abm_level_property) {
5913 dm_new_state->abm_level = val;
5914 ret = 0;
5915 }
5916
5917 return ret;
5918 }
5919
5920 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5921 const struct drm_connector_state *state,
5922 struct drm_property *property,
5923 uint64_t *val)
5924 {
5925 struct drm_device *dev = connector->dev;
5926 struct amdgpu_device *adev = drm_to_adev(dev);
5927 struct dm_connector_state *dm_state =
5928 to_dm_connector_state(state);
5929 int ret = -EINVAL;
5930
5931 if (property == dev->mode_config.scaling_mode_property) {
5932 switch (dm_state->scaling) {
5933 case RMX_CENTER:
5934 *val = DRM_MODE_SCALE_CENTER;
5935 break;
5936 case RMX_ASPECT:
5937 *val = DRM_MODE_SCALE_ASPECT;
5938 break;
5939 case RMX_FULL:
5940 *val = DRM_MODE_SCALE_FULLSCREEN;
5941 break;
5942 case RMX_OFF:
5943 default:
5944 *val = DRM_MODE_SCALE_NONE;
5945 break;
5946 }
5947 ret = 0;
5948 } else if (property == adev->mode_info.underscan_hborder_property) {
5949 *val = dm_state->underscan_hborder;
5950 ret = 0;
5951 } else if (property == adev->mode_info.underscan_vborder_property) {
5952 *val = dm_state->underscan_vborder;
5953 ret = 0;
5954 } else if (property == adev->mode_info.underscan_property) {
5955 *val = dm_state->underscan_enable;
5956 ret = 0;
5957 } else if (property == adev->mode_info.abm_level_property) {
5958 *val = dm_state->abm_level;
5959 ret = 0;
5960 }
5961
5962 return ret;
5963 }
5964
5965 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5966 {
5967 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5968
5969 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5970 }
5971
5972 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5973 {
5974 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5975 const struct dc_link *link = aconnector->dc_link;
5976 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5977 struct amdgpu_display_manager *dm = &adev->dm;
5978
5979 /*
5980 * Call only if mst_mgr was iniitalized before since it's not done
5981 * for all connector types.
5982 */
5983 if (aconnector->mst_mgr.dev)
5984 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5985
5986 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5987 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5988
5989 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5990 link->type != dc_connection_none &&
5991 dm->backlight_dev) {
5992 backlight_device_unregister(dm->backlight_dev);
5993 dm->backlight_dev = NULL;
5994 }
5995 #endif
5996
5997 if (aconnector->dc_em_sink)
5998 dc_sink_release(aconnector->dc_em_sink);
5999 aconnector->dc_em_sink = NULL;
6000 if (aconnector->dc_sink)
6001 dc_sink_release(aconnector->dc_sink);
6002 aconnector->dc_sink = NULL;
6003
6004 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6005 drm_connector_unregister(connector);
6006 drm_connector_cleanup(connector);
6007 if (aconnector->i2c) {
6008 i2c_del_adapter(&aconnector->i2c->base);
6009 kfree(aconnector->i2c);
6010 }
6011 kfree(aconnector->dm_dp_aux.aux.name);
6012
6013 kfree(connector);
6014 }
6015
6016 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6017 {
6018 struct dm_connector_state *state =
6019 to_dm_connector_state(connector->state);
6020
6021 if (connector->state)
6022 __drm_atomic_helper_connector_destroy_state(connector->state);
6023
6024 kfree(state);
6025
6026 state = kzalloc(sizeof(*state), GFP_KERNEL);
6027
6028 if (state) {
6029 state->scaling = RMX_OFF;
6030 state->underscan_enable = false;
6031 state->underscan_hborder = 0;
6032 state->underscan_vborder = 0;
6033 state->base.max_requested_bpc = 8;
6034 state->vcpi_slots = 0;
6035 state->pbn = 0;
6036 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6037 state->abm_level = amdgpu_dm_abm_level;
6038
6039 __drm_atomic_helper_connector_reset(connector, &state->base);
6040 }
6041 }
6042
6043 struct drm_connector_state *
6044 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6045 {
6046 struct dm_connector_state *state =
6047 to_dm_connector_state(connector->state);
6048
6049 struct dm_connector_state *new_state =
6050 kmemdup(state, sizeof(*state), GFP_KERNEL);
6051
6052 if (!new_state)
6053 return NULL;
6054
6055 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6056
6057 new_state->freesync_capable = state->freesync_capable;
6058 new_state->abm_level = state->abm_level;
6059 new_state->scaling = state->scaling;
6060 new_state->underscan_enable = state->underscan_enable;
6061 new_state->underscan_hborder = state->underscan_hborder;
6062 new_state->underscan_vborder = state->underscan_vborder;
6063 new_state->vcpi_slots = state->vcpi_slots;
6064 new_state->pbn = state->pbn;
6065 return &new_state->base;
6066 }
6067
6068 static int
6069 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6070 {
6071 struct amdgpu_dm_connector *amdgpu_dm_connector =
6072 to_amdgpu_dm_connector(connector);
6073 int r;
6074
6075 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6076 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6077 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6078 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6079 if (r)
6080 return r;
6081 }
6082
6083 #if defined(CONFIG_DEBUG_FS)
6084 connector_debugfs_init(amdgpu_dm_connector);
6085 #endif
6086
6087 return 0;
6088 }
6089
6090 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6091 .reset = amdgpu_dm_connector_funcs_reset,
6092 .detect = amdgpu_dm_connector_detect,
6093 .fill_modes = drm_helper_probe_single_connector_modes,
6094 .destroy = amdgpu_dm_connector_destroy,
6095 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6096 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6097 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6098 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6099 .late_register = amdgpu_dm_connector_late_register,
6100 .early_unregister = amdgpu_dm_connector_unregister
6101 };
6102
6103 static int get_modes(struct drm_connector *connector)
6104 {
6105 return amdgpu_dm_connector_get_modes(connector);
6106 }
6107
6108 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6109 {
6110 struct dc_sink_init_data init_params = {
6111 .link = aconnector->dc_link,
6112 .sink_signal = SIGNAL_TYPE_VIRTUAL
6113 };
6114 struct edid *edid;
6115
6116 if (!aconnector->base.edid_blob_ptr) {
6117 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6118 aconnector->base.name);
6119
6120 aconnector->base.force = DRM_FORCE_OFF;
6121 aconnector->base.override_edid = false;
6122 return;
6123 }
6124
6125 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6126
6127 aconnector->edid = edid;
6128
6129 aconnector->dc_em_sink = dc_link_add_remote_sink(
6130 aconnector->dc_link,
6131 (uint8_t *)edid,
6132 (edid->extensions + 1) * EDID_LENGTH,
6133 &init_params);
6134
6135 if (aconnector->base.force == DRM_FORCE_ON) {
6136 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6137 aconnector->dc_link->local_sink :
6138 aconnector->dc_em_sink;
6139 dc_sink_retain(aconnector->dc_sink);
6140 }
6141 }
6142
6143 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6144 {
6145 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6146
6147 /*
6148 * In case of headless boot with force on for DP managed connector
6149 * Those settings have to be != 0 to get initial modeset
6150 */
6151 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6152 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6153 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6154 }
6155
6156
6157 aconnector->base.override_edid = true;
6158 create_eml_sink(aconnector);
6159 }
6160
6161 static struct dc_stream_state *
6162 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6163 const struct drm_display_mode *drm_mode,
6164 const struct dm_connector_state *dm_state,
6165 const struct dc_stream_state *old_stream)
6166 {
6167 struct drm_connector *connector = &aconnector->base;
6168 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6169 struct dc_stream_state *stream;
6170 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6171 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6172 enum dc_status dc_result = DC_OK;
6173
6174 do {
6175 stream = create_stream_for_sink(aconnector, drm_mode,
6176 dm_state, old_stream,
6177 requested_bpc);
6178 if (stream == NULL) {
6179 DRM_ERROR("Failed to create stream for sink!\n");
6180 break;
6181 }
6182
6183 dc_result = dc_validate_stream(adev->dm.dc, stream);
6184
6185 if (dc_result != DC_OK) {
6186 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6187 drm_mode->hdisplay,
6188 drm_mode->vdisplay,
6189 drm_mode->clock,
6190 dc_result,
6191 dc_status_to_str(dc_result));
6192
6193 dc_stream_release(stream);
6194 stream = NULL;
6195 requested_bpc -= 2; /* lower bpc to retry validation */
6196 }
6197
6198 } while (stream == NULL && requested_bpc >= 6);
6199
6200 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6201 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6202
6203 aconnector->force_yuv420_output = true;
6204 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6205 dm_state, old_stream);
6206 aconnector->force_yuv420_output = false;
6207 }
6208
6209 return stream;
6210 }
6211
6212 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6213 struct drm_display_mode *mode)
6214 {
6215 int result = MODE_ERROR;
6216 struct dc_sink *dc_sink;
6217 /* TODO: Unhardcode stream count */
6218 struct dc_stream_state *stream;
6219 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6220
6221 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6222 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6223 return result;
6224
6225 /*
6226 * Only run this the first time mode_valid is called to initilialize
6227 * EDID mgmt
6228 */
6229 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6230 !aconnector->dc_em_sink)
6231 handle_edid_mgmt(aconnector);
6232
6233 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6234
6235 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6236 aconnector->base.force != DRM_FORCE_ON) {
6237 DRM_ERROR("dc_sink is NULL!\n");
6238 goto fail;
6239 }
6240
6241 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6242 if (stream) {
6243 dc_stream_release(stream);
6244 result = MODE_OK;
6245 }
6246
6247 fail:
6248 /* TODO: error handling*/
6249 return result;
6250 }
6251
6252 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6253 struct dc_info_packet *out)
6254 {
6255 struct hdmi_drm_infoframe frame;
6256 unsigned char buf[30]; /* 26 + 4 */
6257 ssize_t len;
6258 int ret, i;
6259
6260 memset(out, 0, sizeof(*out));
6261
6262 if (!state->hdr_output_metadata)
6263 return 0;
6264
6265 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6266 if (ret)
6267 return ret;
6268
6269 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6270 if (len < 0)
6271 return (int)len;
6272
6273 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6274 if (len != 30)
6275 return -EINVAL;
6276
6277 /* Prepare the infopacket for DC. */
6278 switch (state->connector->connector_type) {
6279 case DRM_MODE_CONNECTOR_HDMIA:
6280 out->hb0 = 0x87; /* type */
6281 out->hb1 = 0x01; /* version */
6282 out->hb2 = 0x1A; /* length */
6283 out->sb[0] = buf[3]; /* checksum */
6284 i = 1;
6285 break;
6286
6287 case DRM_MODE_CONNECTOR_DisplayPort:
6288 case DRM_MODE_CONNECTOR_eDP:
6289 out->hb0 = 0x00; /* sdp id, zero */
6290 out->hb1 = 0x87; /* type */
6291 out->hb2 = 0x1D; /* payload len - 1 */
6292 out->hb3 = (0x13 << 2); /* sdp version */
6293 out->sb[0] = 0x01; /* version */
6294 out->sb[1] = 0x1A; /* length */
6295 i = 2;
6296 break;
6297
6298 default:
6299 return -EINVAL;
6300 }
6301
6302 memcpy(&out->sb[i], &buf[4], 26);
6303 out->valid = true;
6304
6305 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6306 sizeof(out->sb), false);
6307
6308 return 0;
6309 }
6310
6311 static int
6312 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6313 struct drm_atomic_state *state)
6314 {
6315 struct drm_connector_state *new_con_state =
6316 drm_atomic_get_new_connector_state(state, conn);
6317 struct drm_connector_state *old_con_state =
6318 drm_atomic_get_old_connector_state(state, conn);
6319 struct drm_crtc *crtc = new_con_state->crtc;
6320 struct drm_crtc_state *new_crtc_state;
6321 int ret;
6322
6323 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6324
6325 if (!crtc)
6326 return 0;
6327
6328 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6329 struct dc_info_packet hdr_infopacket;
6330
6331 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6332 if (ret)
6333 return ret;
6334
6335 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6336 if (IS_ERR(new_crtc_state))
6337 return PTR_ERR(new_crtc_state);
6338
6339 /*
6340 * DC considers the stream backends changed if the
6341 * static metadata changes. Forcing the modeset also
6342 * gives a simple way for userspace to switch from
6343 * 8bpc to 10bpc when setting the metadata to enter
6344 * or exit HDR.
6345 *
6346 * Changing the static metadata after it's been
6347 * set is permissible, however. So only force a
6348 * modeset if we're entering or exiting HDR.
6349 */
6350 new_crtc_state->mode_changed =
6351 !old_con_state->hdr_output_metadata ||
6352 !new_con_state->hdr_output_metadata;
6353 }
6354
6355 return 0;
6356 }
6357
6358 static const struct drm_connector_helper_funcs
6359 amdgpu_dm_connector_helper_funcs = {
6360 /*
6361 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6362 * modes will be filtered by drm_mode_validate_size(), and those modes
6363 * are missing after user start lightdm. So we need to renew modes list.
6364 * in get_modes call back, not just return the modes count
6365 */
6366 .get_modes = get_modes,
6367 .mode_valid = amdgpu_dm_connector_mode_valid,
6368 .atomic_check = amdgpu_dm_connector_atomic_check,
6369 };
6370
6371 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6372 {
6373 }
6374
6375 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6376 {
6377 struct drm_atomic_state *state = new_crtc_state->state;
6378 struct drm_plane *plane;
6379 int num_active = 0;
6380
6381 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6382 struct drm_plane_state *new_plane_state;
6383
6384 /* Cursor planes are "fake". */
6385 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6386 continue;
6387
6388 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6389
6390 if (!new_plane_state) {
6391 /*
6392 * The plane is enable on the CRTC and hasn't changed
6393 * state. This means that it previously passed
6394 * validation and is therefore enabled.
6395 */
6396 num_active += 1;
6397 continue;
6398 }
6399
6400 /* We need a framebuffer to be considered enabled. */
6401 num_active += (new_plane_state->fb != NULL);
6402 }
6403
6404 return num_active;
6405 }
6406
6407 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6408 struct drm_crtc_state *new_crtc_state)
6409 {
6410 struct dm_crtc_state *dm_new_crtc_state =
6411 to_dm_crtc_state(new_crtc_state);
6412
6413 dm_new_crtc_state->active_planes = 0;
6414
6415 if (!dm_new_crtc_state->stream)
6416 return;
6417
6418 dm_new_crtc_state->active_planes =
6419 count_crtc_active_planes(new_crtc_state);
6420 }
6421
6422 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6423 struct drm_atomic_state *state)
6424 {
6425 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6426 crtc);
6427 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6428 struct dc *dc = adev->dm.dc;
6429 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6430 int ret = -EINVAL;
6431
6432 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6433
6434 dm_update_crtc_active_planes(crtc, crtc_state);
6435
6436 if (unlikely(!dm_crtc_state->stream &&
6437 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6438 WARN_ON(1);
6439 return ret;
6440 }
6441
6442 /*
6443 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6444 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6445 * planes are disabled, which is not supported by the hardware. And there is legacy
6446 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6447 */
6448 if (crtc_state->enable &&
6449 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6450 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6451 return -EINVAL;
6452 }
6453
6454 /* In some use cases, like reset, no stream is attached */
6455 if (!dm_crtc_state->stream)
6456 return 0;
6457
6458 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6459 return 0;
6460
6461 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6462 return ret;
6463 }
6464
6465 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6466 const struct drm_display_mode *mode,
6467 struct drm_display_mode *adjusted_mode)
6468 {
6469 return true;
6470 }
6471
6472 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6473 .disable = dm_crtc_helper_disable,
6474 .atomic_check = dm_crtc_helper_atomic_check,
6475 .mode_fixup = dm_crtc_helper_mode_fixup,
6476 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6477 };
6478
6479 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6480 {
6481
6482 }
6483
6484 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6485 {
6486 switch (display_color_depth) {
6487 case COLOR_DEPTH_666:
6488 return 6;
6489 case COLOR_DEPTH_888:
6490 return 8;
6491 case COLOR_DEPTH_101010:
6492 return 10;
6493 case COLOR_DEPTH_121212:
6494 return 12;
6495 case COLOR_DEPTH_141414:
6496 return 14;
6497 case COLOR_DEPTH_161616:
6498 return 16;
6499 default:
6500 break;
6501 }
6502 return 0;
6503 }
6504
6505 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6506 struct drm_crtc_state *crtc_state,
6507 struct drm_connector_state *conn_state)
6508 {
6509 struct drm_atomic_state *state = crtc_state->state;
6510 struct drm_connector *connector = conn_state->connector;
6511 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6512 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6513 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6514 struct drm_dp_mst_topology_mgr *mst_mgr;
6515 struct drm_dp_mst_port *mst_port;
6516 enum dc_color_depth color_depth;
6517 int clock, bpp = 0;
6518 bool is_y420 = false;
6519
6520 if (!aconnector->port || !aconnector->dc_sink)
6521 return 0;
6522
6523 mst_port = aconnector->port;
6524 mst_mgr = &aconnector->mst_port->mst_mgr;
6525
6526 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6527 return 0;
6528
6529 if (!state->duplicated) {
6530 int max_bpc = conn_state->max_requested_bpc;
6531 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6532 aconnector->force_yuv420_output;
6533 color_depth = convert_color_depth_from_display_info(connector,
6534 is_y420,
6535 max_bpc);
6536 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6537 clock = adjusted_mode->clock;
6538 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6539 }
6540 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6541 mst_mgr,
6542 mst_port,
6543 dm_new_connector_state->pbn,
6544 dm_mst_get_pbn_divider(aconnector->dc_link));
6545 if (dm_new_connector_state->vcpi_slots < 0) {
6546 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6547 return dm_new_connector_state->vcpi_slots;
6548 }
6549 return 0;
6550 }
6551
6552 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6553 .disable = dm_encoder_helper_disable,
6554 .atomic_check = dm_encoder_helper_atomic_check
6555 };
6556
6557 #if defined(CONFIG_DRM_AMD_DC_DCN)
6558 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6559 struct dc_state *dc_state)
6560 {
6561 struct dc_stream_state *stream = NULL;
6562 struct drm_connector *connector;
6563 struct drm_connector_state *new_con_state, *old_con_state;
6564 struct amdgpu_dm_connector *aconnector;
6565 struct dm_connector_state *dm_conn_state;
6566 int i, j, clock, bpp;
6567 int vcpi, pbn_div, pbn = 0;
6568
6569 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6570
6571 aconnector = to_amdgpu_dm_connector(connector);
6572
6573 if (!aconnector->port)
6574 continue;
6575
6576 if (!new_con_state || !new_con_state->crtc)
6577 continue;
6578
6579 dm_conn_state = to_dm_connector_state(new_con_state);
6580
6581 for (j = 0; j < dc_state->stream_count; j++) {
6582 stream = dc_state->streams[j];
6583 if (!stream)
6584 continue;
6585
6586 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6587 break;
6588
6589 stream = NULL;
6590 }
6591
6592 if (!stream)
6593 continue;
6594
6595 if (stream->timing.flags.DSC != 1) {
6596 drm_dp_mst_atomic_enable_dsc(state,
6597 aconnector->port,
6598 dm_conn_state->pbn,
6599 0,
6600 false);
6601 continue;
6602 }
6603
6604 pbn_div = dm_mst_get_pbn_divider(stream->link);
6605 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6606 clock = stream->timing.pix_clk_100hz / 10;
6607 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6608 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6609 aconnector->port,
6610 pbn, pbn_div,
6611 true);
6612 if (vcpi < 0)
6613 return vcpi;
6614
6615 dm_conn_state->pbn = pbn;
6616 dm_conn_state->vcpi_slots = vcpi;
6617 }
6618 return 0;
6619 }
6620 #endif
6621
6622 static void dm_drm_plane_reset(struct drm_plane *plane)
6623 {
6624 struct dm_plane_state *amdgpu_state = NULL;
6625
6626 if (plane->state)
6627 plane->funcs->atomic_destroy_state(plane, plane->state);
6628
6629 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6630 WARN_ON(amdgpu_state == NULL);
6631
6632 if (amdgpu_state)
6633 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6634 }
6635
6636 static struct drm_plane_state *
6637 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6638 {
6639 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6640
6641 old_dm_plane_state = to_dm_plane_state(plane->state);
6642 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6643 if (!dm_plane_state)
6644 return NULL;
6645
6646 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6647
6648 if (old_dm_plane_state->dc_state) {
6649 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6650 dc_plane_state_retain(dm_plane_state->dc_state);
6651 }
6652
6653 return &dm_plane_state->base;
6654 }
6655
6656 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6657 struct drm_plane_state *state)
6658 {
6659 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6660
6661 if (dm_plane_state->dc_state)
6662 dc_plane_state_release(dm_plane_state->dc_state);
6663
6664 drm_atomic_helper_plane_destroy_state(plane, state);
6665 }
6666
6667 static const struct drm_plane_funcs dm_plane_funcs = {
6668 .update_plane = drm_atomic_helper_update_plane,
6669 .disable_plane = drm_atomic_helper_disable_plane,
6670 .destroy = drm_primary_helper_destroy,
6671 .reset = dm_drm_plane_reset,
6672 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6673 .atomic_destroy_state = dm_drm_plane_destroy_state,
6674 .format_mod_supported = dm_plane_format_mod_supported,
6675 };
6676
6677 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6678 struct drm_plane_state *new_state)
6679 {
6680 struct amdgpu_framebuffer *afb;
6681 struct drm_gem_object *obj;
6682 struct amdgpu_device *adev;
6683 struct amdgpu_bo *rbo;
6684 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6685 struct list_head list;
6686 struct ttm_validate_buffer tv;
6687 struct ww_acquire_ctx ticket;
6688 uint32_t domain;
6689 int r;
6690
6691 if (!new_state->fb) {
6692 DRM_DEBUG_KMS("No FB bound\n");
6693 return 0;
6694 }
6695
6696 afb = to_amdgpu_framebuffer(new_state->fb);
6697 obj = new_state->fb->obj[0];
6698 rbo = gem_to_amdgpu_bo(obj);
6699 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6700 INIT_LIST_HEAD(&list);
6701
6702 tv.bo = &rbo->tbo;
6703 tv.num_shared = 1;
6704 list_add(&tv.head, &list);
6705
6706 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6707 if (r) {
6708 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6709 return r;
6710 }
6711
6712 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6713 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6714 else
6715 domain = AMDGPU_GEM_DOMAIN_VRAM;
6716
6717 r = amdgpu_bo_pin(rbo, domain);
6718 if (unlikely(r != 0)) {
6719 if (r != -ERESTARTSYS)
6720 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6721 ttm_eu_backoff_reservation(&ticket, &list);
6722 return r;
6723 }
6724
6725 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6726 if (unlikely(r != 0)) {
6727 amdgpu_bo_unpin(rbo);
6728 ttm_eu_backoff_reservation(&ticket, &list);
6729 DRM_ERROR("%p bind failed\n", rbo);
6730 return r;
6731 }
6732
6733 ttm_eu_backoff_reservation(&ticket, &list);
6734
6735 afb->address = amdgpu_bo_gpu_offset(rbo);
6736
6737 amdgpu_bo_ref(rbo);
6738
6739 /**
6740 * We don't do surface updates on planes that have been newly created,
6741 * but we also don't have the afb->address during atomic check.
6742 *
6743 * Fill in buffer attributes depending on the address here, but only on
6744 * newly created planes since they're not being used by DC yet and this
6745 * won't modify global state.
6746 */
6747 dm_plane_state_old = to_dm_plane_state(plane->state);
6748 dm_plane_state_new = to_dm_plane_state(new_state);
6749
6750 if (dm_plane_state_new->dc_state &&
6751 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6752 struct dc_plane_state *plane_state =
6753 dm_plane_state_new->dc_state;
6754 bool force_disable_dcc = !plane_state->dcc.enable;
6755
6756 fill_plane_buffer_attributes(
6757 adev, afb, plane_state->format, plane_state->rotation,
6758 afb->tiling_flags,
6759 &plane_state->tiling_info, &plane_state->plane_size,
6760 &plane_state->dcc, &plane_state->address,
6761 afb->tmz_surface, force_disable_dcc);
6762 }
6763
6764 return 0;
6765 }
6766
6767 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6768 struct drm_plane_state *old_state)
6769 {
6770 struct amdgpu_bo *rbo;
6771 int r;
6772
6773 if (!old_state->fb)
6774 return;
6775
6776 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6777 r = amdgpu_bo_reserve(rbo, false);
6778 if (unlikely(r)) {
6779 DRM_ERROR("failed to reserve rbo before unpin\n");
6780 return;
6781 }
6782
6783 amdgpu_bo_unpin(rbo);
6784 amdgpu_bo_unreserve(rbo);
6785 amdgpu_bo_unref(&rbo);
6786 }
6787
6788 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6789 struct drm_crtc_state *new_crtc_state)
6790 {
6791 struct drm_framebuffer *fb = state->fb;
6792 int min_downscale, max_upscale;
6793 int min_scale = 0;
6794 int max_scale = INT_MAX;
6795
6796 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6797 if (fb && state->crtc) {
6798 /* Validate viewport to cover the case when only the position changes */
6799 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6800 int viewport_width = state->crtc_w;
6801 int viewport_height = state->crtc_h;
6802
6803 if (state->crtc_x < 0)
6804 viewport_width += state->crtc_x;
6805 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6806 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6807
6808 if (state->crtc_y < 0)
6809 viewport_height += state->crtc_y;
6810 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6811 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6812
6813 if (viewport_width < 0 || viewport_height < 0) {
6814 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6815 return -EINVAL;
6816 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6817 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6818 return -EINVAL;
6819 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6820 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6821 return -EINVAL;
6822 }
6823
6824 }
6825
6826 /* Get min/max allowed scaling factors from plane caps. */
6827 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6828 &min_downscale, &max_upscale);
6829 /*
6830 * Convert to drm convention: 16.16 fixed point, instead of dc's
6831 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6832 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6833 */
6834 min_scale = (1000 << 16) / max_upscale;
6835 max_scale = (1000 << 16) / min_downscale;
6836 }
6837
6838 return drm_atomic_helper_check_plane_state(
6839 state, new_crtc_state, min_scale, max_scale, true, true);
6840 }
6841
6842 static int dm_plane_atomic_check(struct drm_plane *plane,
6843 struct drm_atomic_state *state)
6844 {
6845 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6846 plane);
6847 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6848 struct dc *dc = adev->dm.dc;
6849 struct dm_plane_state *dm_plane_state;
6850 struct dc_scaling_info scaling_info;
6851 struct drm_crtc_state *new_crtc_state;
6852 int ret;
6853
6854 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6855
6856 dm_plane_state = to_dm_plane_state(new_plane_state);
6857
6858 if (!dm_plane_state->dc_state)
6859 return 0;
6860
6861 new_crtc_state =
6862 drm_atomic_get_new_crtc_state(state,
6863 new_plane_state->crtc);
6864 if (!new_crtc_state)
6865 return -EINVAL;
6866
6867 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6868 if (ret)
6869 return ret;
6870
6871 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6872 if (ret)
6873 return ret;
6874
6875 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6876 return 0;
6877
6878 return -EINVAL;
6879 }
6880
6881 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6882 struct drm_atomic_state *state)
6883 {
6884 /* Only support async updates on cursor planes. */
6885 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6886 return -EINVAL;
6887
6888 return 0;
6889 }
6890
6891 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6892 struct drm_atomic_state *state)
6893 {
6894 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6895 plane);
6896 struct drm_plane_state *old_state =
6897 drm_atomic_get_old_plane_state(state, plane);
6898
6899 trace_amdgpu_dm_atomic_update_cursor(new_state);
6900
6901 swap(plane->state->fb, new_state->fb);
6902
6903 plane->state->src_x = new_state->src_x;
6904 plane->state->src_y = new_state->src_y;
6905 plane->state->src_w = new_state->src_w;
6906 plane->state->src_h = new_state->src_h;
6907 plane->state->crtc_x = new_state->crtc_x;
6908 plane->state->crtc_y = new_state->crtc_y;
6909 plane->state->crtc_w = new_state->crtc_w;
6910 plane->state->crtc_h = new_state->crtc_h;
6911
6912 handle_cursor_update(plane, old_state);
6913 }
6914
6915 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6916 .prepare_fb = dm_plane_helper_prepare_fb,
6917 .cleanup_fb = dm_plane_helper_cleanup_fb,
6918 .atomic_check = dm_plane_atomic_check,
6919 .atomic_async_check = dm_plane_atomic_async_check,
6920 .atomic_async_update = dm_plane_atomic_async_update
6921 };
6922
6923 /*
6924 * TODO: these are currently initialized to rgb formats only.
6925 * For future use cases we should either initialize them dynamically based on
6926 * plane capabilities, or initialize this array to all formats, so internal drm
6927 * check will succeed, and let DC implement proper check
6928 */
6929 static const uint32_t rgb_formats[] = {
6930 DRM_FORMAT_XRGB8888,
6931 DRM_FORMAT_ARGB8888,
6932 DRM_FORMAT_RGBA8888,
6933 DRM_FORMAT_XRGB2101010,
6934 DRM_FORMAT_XBGR2101010,
6935 DRM_FORMAT_ARGB2101010,
6936 DRM_FORMAT_ABGR2101010,
6937 DRM_FORMAT_XBGR8888,
6938 DRM_FORMAT_ABGR8888,
6939 DRM_FORMAT_RGB565,
6940 };
6941
6942 static const uint32_t overlay_formats[] = {
6943 DRM_FORMAT_XRGB8888,
6944 DRM_FORMAT_ARGB8888,
6945 DRM_FORMAT_RGBA8888,
6946 DRM_FORMAT_XBGR8888,
6947 DRM_FORMAT_ABGR8888,
6948 DRM_FORMAT_RGB565
6949 };
6950
6951 static const u32 cursor_formats[] = {
6952 DRM_FORMAT_ARGB8888
6953 };
6954
6955 static int get_plane_formats(const struct drm_plane *plane,
6956 const struct dc_plane_cap *plane_cap,
6957 uint32_t *formats, int max_formats)
6958 {
6959 int i, num_formats = 0;
6960
6961 /*
6962 * TODO: Query support for each group of formats directly from
6963 * DC plane caps. This will require adding more formats to the
6964 * caps list.
6965 */
6966
6967 switch (plane->type) {
6968 case DRM_PLANE_TYPE_PRIMARY:
6969 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6970 if (num_formats >= max_formats)
6971 break;
6972
6973 formats[num_formats++] = rgb_formats[i];
6974 }
6975
6976 if (plane_cap && plane_cap->pixel_format_support.nv12)
6977 formats[num_formats++] = DRM_FORMAT_NV12;
6978 if (plane_cap && plane_cap->pixel_format_support.p010)
6979 formats[num_formats++] = DRM_FORMAT_P010;
6980 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6981 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6982 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6983 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6984 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6985 }
6986 break;
6987
6988 case DRM_PLANE_TYPE_OVERLAY:
6989 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6990 if (num_formats >= max_formats)
6991 break;
6992
6993 formats[num_formats++] = overlay_formats[i];
6994 }
6995 break;
6996
6997 case DRM_PLANE_TYPE_CURSOR:
6998 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6999 if (num_formats >= max_formats)
7000 break;
7001
7002 formats[num_formats++] = cursor_formats[i];
7003 }
7004 break;
7005 }
7006
7007 return num_formats;
7008 }
7009
7010 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7011 struct drm_plane *plane,
7012 unsigned long possible_crtcs,
7013 const struct dc_plane_cap *plane_cap)
7014 {
7015 uint32_t formats[32];
7016 int num_formats;
7017 int res = -EPERM;
7018 unsigned int supported_rotations;
7019 uint64_t *modifiers = NULL;
7020
7021 num_formats = get_plane_formats(plane, plane_cap, formats,
7022 ARRAY_SIZE(formats));
7023
7024 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7025 if (res)
7026 return res;
7027
7028 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7029 &dm_plane_funcs, formats, num_formats,
7030 modifiers, plane->type, NULL);
7031 kfree(modifiers);
7032 if (res)
7033 return res;
7034
7035 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7036 plane_cap && plane_cap->per_pixel_alpha) {
7037 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7038 BIT(DRM_MODE_BLEND_PREMULTI);
7039
7040 drm_plane_create_alpha_property(plane);
7041 drm_plane_create_blend_mode_property(plane, blend_caps);
7042 }
7043
7044 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7045 plane_cap &&
7046 (plane_cap->pixel_format_support.nv12 ||
7047 plane_cap->pixel_format_support.p010)) {
7048 /* This only affects YUV formats. */
7049 drm_plane_create_color_properties(
7050 plane,
7051 BIT(DRM_COLOR_YCBCR_BT601) |
7052 BIT(DRM_COLOR_YCBCR_BT709) |
7053 BIT(DRM_COLOR_YCBCR_BT2020),
7054 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7055 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7056 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7057 }
7058
7059 supported_rotations =
7060 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7061 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7062
7063 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7064 plane->type != DRM_PLANE_TYPE_CURSOR)
7065 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7066 supported_rotations);
7067
7068 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7069
7070 /* Create (reset) the plane state */
7071 if (plane->funcs->reset)
7072 plane->funcs->reset(plane);
7073
7074 return 0;
7075 }
7076
7077 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7078 struct drm_plane *plane,
7079 uint32_t crtc_index)
7080 {
7081 struct amdgpu_crtc *acrtc = NULL;
7082 struct drm_plane *cursor_plane;
7083
7084 int res = -ENOMEM;
7085
7086 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7087 if (!cursor_plane)
7088 goto fail;
7089
7090 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7091 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7092
7093 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7094 if (!acrtc)
7095 goto fail;
7096
7097 res = drm_crtc_init_with_planes(
7098 dm->ddev,
7099 &acrtc->base,
7100 plane,
7101 cursor_plane,
7102 &amdgpu_dm_crtc_funcs, NULL);
7103
7104 if (res)
7105 goto fail;
7106
7107 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7108
7109 /* Create (reset) the plane state */
7110 if (acrtc->base.funcs->reset)
7111 acrtc->base.funcs->reset(&acrtc->base);
7112
7113 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7114 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7115
7116 acrtc->crtc_id = crtc_index;
7117 acrtc->base.enabled = false;
7118 acrtc->otg_inst = -1;
7119
7120 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7121 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7122 true, MAX_COLOR_LUT_ENTRIES);
7123 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7124
7125 return 0;
7126
7127 fail:
7128 kfree(acrtc);
7129 kfree(cursor_plane);
7130 return res;
7131 }
7132
7133
7134 static int to_drm_connector_type(enum signal_type st)
7135 {
7136 switch (st) {
7137 case SIGNAL_TYPE_HDMI_TYPE_A:
7138 return DRM_MODE_CONNECTOR_HDMIA;
7139 case SIGNAL_TYPE_EDP:
7140 return DRM_MODE_CONNECTOR_eDP;
7141 case SIGNAL_TYPE_LVDS:
7142 return DRM_MODE_CONNECTOR_LVDS;
7143 case SIGNAL_TYPE_RGB:
7144 return DRM_MODE_CONNECTOR_VGA;
7145 case SIGNAL_TYPE_DISPLAY_PORT:
7146 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7147 return DRM_MODE_CONNECTOR_DisplayPort;
7148 case SIGNAL_TYPE_DVI_DUAL_LINK:
7149 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7150 return DRM_MODE_CONNECTOR_DVID;
7151 case SIGNAL_TYPE_VIRTUAL:
7152 return DRM_MODE_CONNECTOR_VIRTUAL;
7153
7154 default:
7155 return DRM_MODE_CONNECTOR_Unknown;
7156 }
7157 }
7158
7159 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7160 {
7161 struct drm_encoder *encoder;
7162
7163 /* There is only one encoder per connector */
7164 drm_connector_for_each_possible_encoder(connector, encoder)
7165 return encoder;
7166
7167 return NULL;
7168 }
7169
7170 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7171 {
7172 struct drm_encoder *encoder;
7173 struct amdgpu_encoder *amdgpu_encoder;
7174
7175 encoder = amdgpu_dm_connector_to_encoder(connector);
7176
7177 if (encoder == NULL)
7178 return;
7179
7180 amdgpu_encoder = to_amdgpu_encoder(encoder);
7181
7182 amdgpu_encoder->native_mode.clock = 0;
7183
7184 if (!list_empty(&connector->probed_modes)) {
7185 struct drm_display_mode *preferred_mode = NULL;
7186
7187 list_for_each_entry(preferred_mode,
7188 &connector->probed_modes,
7189 head) {
7190 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7191 amdgpu_encoder->native_mode = *preferred_mode;
7192
7193 break;
7194 }
7195
7196 }
7197 }
7198
7199 static struct drm_display_mode *
7200 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7201 char *name,
7202 int hdisplay, int vdisplay)
7203 {
7204 struct drm_device *dev = encoder->dev;
7205 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7206 struct drm_display_mode *mode = NULL;
7207 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7208
7209 mode = drm_mode_duplicate(dev, native_mode);
7210
7211 if (mode == NULL)
7212 return NULL;
7213
7214 mode->hdisplay = hdisplay;
7215 mode->vdisplay = vdisplay;
7216 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7217 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7218
7219 return mode;
7220
7221 }
7222
7223 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7224 struct drm_connector *connector)
7225 {
7226 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7227 struct drm_display_mode *mode = NULL;
7228 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7229 struct amdgpu_dm_connector *amdgpu_dm_connector =
7230 to_amdgpu_dm_connector(connector);
7231 int i;
7232 int n;
7233 struct mode_size {
7234 char name[DRM_DISPLAY_MODE_LEN];
7235 int w;
7236 int h;
7237 } common_modes[] = {
7238 { "640x480", 640, 480},
7239 { "800x600", 800, 600},
7240 { "1024x768", 1024, 768},
7241 { "1280x720", 1280, 720},
7242 { "1280x800", 1280, 800},
7243 {"1280x1024", 1280, 1024},
7244 { "1440x900", 1440, 900},
7245 {"1680x1050", 1680, 1050},
7246 {"1600x1200", 1600, 1200},
7247 {"1920x1080", 1920, 1080},
7248 {"1920x1200", 1920, 1200}
7249 };
7250
7251 n = ARRAY_SIZE(common_modes);
7252
7253 for (i = 0; i < n; i++) {
7254 struct drm_display_mode *curmode = NULL;
7255 bool mode_existed = false;
7256
7257 if (common_modes[i].w > native_mode->hdisplay ||
7258 common_modes[i].h > native_mode->vdisplay ||
7259 (common_modes[i].w == native_mode->hdisplay &&
7260 common_modes[i].h == native_mode->vdisplay))
7261 continue;
7262
7263 list_for_each_entry(curmode, &connector->probed_modes, head) {
7264 if (common_modes[i].w == curmode->hdisplay &&
7265 common_modes[i].h == curmode->vdisplay) {
7266 mode_existed = true;
7267 break;
7268 }
7269 }
7270
7271 if (mode_existed)
7272 continue;
7273
7274 mode = amdgpu_dm_create_common_mode(encoder,
7275 common_modes[i].name, common_modes[i].w,
7276 common_modes[i].h);
7277 drm_mode_probed_add(connector, mode);
7278 amdgpu_dm_connector->num_modes++;
7279 }
7280 }
7281
7282 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7283 struct edid *edid)
7284 {
7285 struct amdgpu_dm_connector *amdgpu_dm_connector =
7286 to_amdgpu_dm_connector(connector);
7287
7288 if (edid) {
7289 /* empty probed_modes */
7290 INIT_LIST_HEAD(&connector->probed_modes);
7291 amdgpu_dm_connector->num_modes =
7292 drm_add_edid_modes(connector, edid);
7293
7294 /* sorting the probed modes before calling function
7295 * amdgpu_dm_get_native_mode() since EDID can have
7296 * more than one preferred mode. The modes that are
7297 * later in the probed mode list could be of higher
7298 * and preferred resolution. For example, 3840x2160
7299 * resolution in base EDID preferred timing and 4096x2160
7300 * preferred resolution in DID extension block later.
7301 */
7302 drm_mode_sort(&connector->probed_modes);
7303 amdgpu_dm_get_native_mode(connector);
7304
7305 /* Freesync capabilities are reset by calling
7306 * drm_add_edid_modes() and need to be
7307 * restored here.
7308 */
7309 amdgpu_dm_update_freesync_caps(connector, edid);
7310 } else {
7311 amdgpu_dm_connector->num_modes = 0;
7312 }
7313 }
7314
7315 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7316 struct drm_display_mode *mode)
7317 {
7318 struct drm_display_mode *m;
7319
7320 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7321 if (drm_mode_equal(m, mode))
7322 return true;
7323 }
7324
7325 return false;
7326 }
7327
7328 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7329 {
7330 const struct drm_display_mode *m;
7331 struct drm_display_mode *new_mode;
7332 uint i;
7333 uint32_t new_modes_count = 0;
7334
7335 /* Standard FPS values
7336 *
7337 * 23.976 - TV/NTSC
7338 * 24 - Cinema
7339 * 25 - TV/PAL
7340 * 29.97 - TV/NTSC
7341 * 30 - TV/NTSC
7342 * 48 - Cinema HFR
7343 * 50 - TV/PAL
7344 * 60 - Commonly used
7345 * 48,72,96 - Multiples of 24
7346 */
7347 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7348 48000, 50000, 60000, 72000, 96000 };
7349
7350 /*
7351 * Find mode with highest refresh rate with the same resolution
7352 * as the preferred mode. Some monitors report a preferred mode
7353 * with lower resolution than the highest refresh rate supported.
7354 */
7355
7356 m = get_highest_refresh_rate_mode(aconnector, true);
7357 if (!m)
7358 return 0;
7359
7360 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7361 uint64_t target_vtotal, target_vtotal_diff;
7362 uint64_t num, den;
7363
7364 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7365 continue;
7366
7367 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7368 common_rates[i] > aconnector->max_vfreq * 1000)
7369 continue;
7370
7371 num = (unsigned long long)m->clock * 1000 * 1000;
7372 den = common_rates[i] * (unsigned long long)m->htotal;
7373 target_vtotal = div_u64(num, den);
7374 target_vtotal_diff = target_vtotal - m->vtotal;
7375
7376 /* Check for illegal modes */
7377 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7378 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7379 m->vtotal + target_vtotal_diff < m->vsync_end)
7380 continue;
7381
7382 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7383 if (!new_mode)
7384 goto out;
7385
7386 new_mode->vtotal += (u16)target_vtotal_diff;
7387 new_mode->vsync_start += (u16)target_vtotal_diff;
7388 new_mode->vsync_end += (u16)target_vtotal_diff;
7389 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7390 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7391
7392 if (!is_duplicate_mode(aconnector, new_mode)) {
7393 drm_mode_probed_add(&aconnector->base, new_mode);
7394 new_modes_count += 1;
7395 } else
7396 drm_mode_destroy(aconnector->base.dev, new_mode);
7397 }
7398 out:
7399 return new_modes_count;
7400 }
7401
7402 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7403 struct edid *edid)
7404 {
7405 struct amdgpu_dm_connector *amdgpu_dm_connector =
7406 to_amdgpu_dm_connector(connector);
7407
7408 if (!(amdgpu_freesync_vid_mode && edid))
7409 return;
7410
7411 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7412 amdgpu_dm_connector->num_modes +=
7413 add_fs_modes(amdgpu_dm_connector);
7414 }
7415
7416 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7417 {
7418 struct amdgpu_dm_connector *amdgpu_dm_connector =
7419 to_amdgpu_dm_connector(connector);
7420 struct drm_encoder *encoder;
7421 struct edid *edid = amdgpu_dm_connector->edid;
7422
7423 encoder = amdgpu_dm_connector_to_encoder(connector);
7424
7425 if (!drm_edid_is_valid(edid)) {
7426 amdgpu_dm_connector->num_modes =
7427 drm_add_modes_noedid(connector, 640, 480);
7428 } else {
7429 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7430 amdgpu_dm_connector_add_common_modes(encoder, connector);
7431 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7432 }
7433 amdgpu_dm_fbc_init(connector);
7434
7435 return amdgpu_dm_connector->num_modes;
7436 }
7437
7438 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7439 struct amdgpu_dm_connector *aconnector,
7440 int connector_type,
7441 struct dc_link *link,
7442 int link_index)
7443 {
7444 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7445
7446 /*
7447 * Some of the properties below require access to state, like bpc.
7448 * Allocate some default initial connector state with our reset helper.
7449 */
7450 if (aconnector->base.funcs->reset)
7451 aconnector->base.funcs->reset(&aconnector->base);
7452
7453 aconnector->connector_id = link_index;
7454 aconnector->dc_link = link;
7455 aconnector->base.interlace_allowed = false;
7456 aconnector->base.doublescan_allowed = false;
7457 aconnector->base.stereo_allowed = false;
7458 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7459 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7460 aconnector->audio_inst = -1;
7461 mutex_init(&aconnector->hpd_lock);
7462
7463 /*
7464 * configure support HPD hot plug connector_>polled default value is 0
7465 * which means HPD hot plug not supported
7466 */
7467 switch (connector_type) {
7468 case DRM_MODE_CONNECTOR_HDMIA:
7469 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7470 aconnector->base.ycbcr_420_allowed =
7471 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7472 break;
7473 case DRM_MODE_CONNECTOR_DisplayPort:
7474 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7475 aconnector->base.ycbcr_420_allowed =
7476 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7477 break;
7478 case DRM_MODE_CONNECTOR_DVID:
7479 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7480 break;
7481 default:
7482 break;
7483 }
7484
7485 drm_object_attach_property(&aconnector->base.base,
7486 dm->ddev->mode_config.scaling_mode_property,
7487 DRM_MODE_SCALE_NONE);
7488
7489 drm_object_attach_property(&aconnector->base.base,
7490 adev->mode_info.underscan_property,
7491 UNDERSCAN_OFF);
7492 drm_object_attach_property(&aconnector->base.base,
7493 adev->mode_info.underscan_hborder_property,
7494 0);
7495 drm_object_attach_property(&aconnector->base.base,
7496 adev->mode_info.underscan_vborder_property,
7497 0);
7498
7499 if (!aconnector->mst_port)
7500 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7501
7502 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7503 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7504 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7505
7506 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7507 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7508 drm_object_attach_property(&aconnector->base.base,
7509 adev->mode_info.abm_level_property, 0);
7510 }
7511
7512 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7513 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7514 connector_type == DRM_MODE_CONNECTOR_eDP) {
7515 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7516
7517 if (!aconnector->mst_port)
7518 drm_connector_attach_vrr_capable_property(&aconnector->base);
7519
7520 #ifdef CONFIG_DRM_AMD_DC_HDCP
7521 if (adev->dm.hdcp_workqueue)
7522 drm_connector_attach_content_protection_property(&aconnector->base, true);
7523 #endif
7524 }
7525 }
7526
7527 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7528 struct i2c_msg *msgs, int num)
7529 {
7530 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7531 struct ddc_service *ddc_service = i2c->ddc_service;
7532 struct i2c_command cmd;
7533 int i;
7534 int result = -EIO;
7535
7536 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7537
7538 if (!cmd.payloads)
7539 return result;
7540
7541 cmd.number_of_payloads = num;
7542 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7543 cmd.speed = 100;
7544
7545 for (i = 0; i < num; i++) {
7546 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7547 cmd.payloads[i].address = msgs[i].addr;
7548 cmd.payloads[i].length = msgs[i].len;
7549 cmd.payloads[i].data = msgs[i].buf;
7550 }
7551
7552 if (dc_submit_i2c(
7553 ddc_service->ctx->dc,
7554 ddc_service->ddc_pin->hw_info.ddc_channel,
7555 &cmd))
7556 result = num;
7557
7558 kfree(cmd.payloads);
7559 return result;
7560 }
7561
7562 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7563 {
7564 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7565 }
7566
7567 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7568 .master_xfer = amdgpu_dm_i2c_xfer,
7569 .functionality = amdgpu_dm_i2c_func,
7570 };
7571
7572 static struct amdgpu_i2c_adapter *
7573 create_i2c(struct ddc_service *ddc_service,
7574 int link_index,
7575 int *res)
7576 {
7577 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7578 struct amdgpu_i2c_adapter *i2c;
7579
7580 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7581 if (!i2c)
7582 return NULL;
7583 i2c->base.owner = THIS_MODULE;
7584 i2c->base.class = I2C_CLASS_DDC;
7585 i2c->base.dev.parent = &adev->pdev->dev;
7586 i2c->base.algo = &amdgpu_dm_i2c_algo;
7587 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7588 i2c_set_adapdata(&i2c->base, i2c);
7589 i2c->ddc_service = ddc_service;
7590 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7591
7592 return i2c;
7593 }
7594
7595
7596 /*
7597 * Note: this function assumes that dc_link_detect() was called for the
7598 * dc_link which will be represented by this aconnector.
7599 */
7600 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7601 struct amdgpu_dm_connector *aconnector,
7602 uint32_t link_index,
7603 struct amdgpu_encoder *aencoder)
7604 {
7605 int res = 0;
7606 int connector_type;
7607 struct dc *dc = dm->dc;
7608 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7609 struct amdgpu_i2c_adapter *i2c;
7610
7611 link->priv = aconnector;
7612
7613 DRM_DEBUG_DRIVER("%s()\n", __func__);
7614
7615 i2c = create_i2c(link->ddc, link->link_index, &res);
7616 if (!i2c) {
7617 DRM_ERROR("Failed to create i2c adapter data\n");
7618 return -ENOMEM;
7619 }
7620
7621 aconnector->i2c = i2c;
7622 res = i2c_add_adapter(&i2c->base);
7623
7624 if (res) {
7625 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7626 goto out_free;
7627 }
7628
7629 connector_type = to_drm_connector_type(link->connector_signal);
7630
7631 res = drm_connector_init_with_ddc(
7632 dm->ddev,
7633 &aconnector->base,
7634 &amdgpu_dm_connector_funcs,
7635 connector_type,
7636 &i2c->base);
7637
7638 if (res) {
7639 DRM_ERROR("connector_init failed\n");
7640 aconnector->connector_id = -1;
7641 goto out_free;
7642 }
7643
7644 drm_connector_helper_add(
7645 &aconnector->base,
7646 &amdgpu_dm_connector_helper_funcs);
7647
7648 amdgpu_dm_connector_init_helper(
7649 dm,
7650 aconnector,
7651 connector_type,
7652 link,
7653 link_index);
7654
7655 drm_connector_attach_encoder(
7656 &aconnector->base, &aencoder->base);
7657
7658 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7659 || connector_type == DRM_MODE_CONNECTOR_eDP)
7660 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7661
7662 out_free:
7663 if (res) {
7664 kfree(i2c);
7665 aconnector->i2c = NULL;
7666 }
7667 return res;
7668 }
7669
7670 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7671 {
7672 switch (adev->mode_info.num_crtc) {
7673 case 1:
7674 return 0x1;
7675 case 2:
7676 return 0x3;
7677 case 3:
7678 return 0x7;
7679 case 4:
7680 return 0xf;
7681 case 5:
7682 return 0x1f;
7683 case 6:
7684 default:
7685 return 0x3f;
7686 }
7687 }
7688
7689 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7690 struct amdgpu_encoder *aencoder,
7691 uint32_t link_index)
7692 {
7693 struct amdgpu_device *adev = drm_to_adev(dev);
7694
7695 int res = drm_encoder_init(dev,
7696 &aencoder->base,
7697 &amdgpu_dm_encoder_funcs,
7698 DRM_MODE_ENCODER_TMDS,
7699 NULL);
7700
7701 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7702
7703 if (!res)
7704 aencoder->encoder_id = link_index;
7705 else
7706 aencoder->encoder_id = -1;
7707
7708 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7709
7710 return res;
7711 }
7712
7713 static void manage_dm_interrupts(struct amdgpu_device *adev,
7714 struct amdgpu_crtc *acrtc,
7715 bool enable)
7716 {
7717 /*
7718 * We have no guarantee that the frontend index maps to the same
7719 * backend index - some even map to more than one.
7720 *
7721 * TODO: Use a different interrupt or check DC itself for the mapping.
7722 */
7723 int irq_type =
7724 amdgpu_display_crtc_idx_to_irq_type(
7725 adev,
7726 acrtc->crtc_id);
7727
7728 if (enable) {
7729 drm_crtc_vblank_on(&acrtc->base);
7730 amdgpu_irq_get(
7731 adev,
7732 &adev->pageflip_irq,
7733 irq_type);
7734 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7735 amdgpu_irq_get(
7736 adev,
7737 &adev->vline0_irq,
7738 irq_type);
7739 #endif
7740 } else {
7741 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7742 amdgpu_irq_put(
7743 adev,
7744 &adev->vline0_irq,
7745 irq_type);
7746 #endif
7747 amdgpu_irq_put(
7748 adev,
7749 &adev->pageflip_irq,
7750 irq_type);
7751 drm_crtc_vblank_off(&acrtc->base);
7752 }
7753 }
7754
7755 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7756 struct amdgpu_crtc *acrtc)
7757 {
7758 int irq_type =
7759 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7760
7761 /**
7762 * This reads the current state for the IRQ and force reapplies
7763 * the setting to hardware.
7764 */
7765 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7766 }
7767
7768 static bool
7769 is_scaling_state_different(const struct dm_connector_state *dm_state,
7770 const struct dm_connector_state *old_dm_state)
7771 {
7772 if (dm_state->scaling != old_dm_state->scaling)
7773 return true;
7774 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7775 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7776 return true;
7777 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7778 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7779 return true;
7780 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7781 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7782 return true;
7783 return false;
7784 }
7785
7786 #ifdef CONFIG_DRM_AMD_DC_HDCP
7787 static bool is_content_protection_different(struct drm_connector_state *state,
7788 const struct drm_connector_state *old_state,
7789 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7790 {
7791 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7792 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7793
7794 /* Handle: Type0/1 change */
7795 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7796 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7797 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7798 return true;
7799 }
7800
7801 /* CP is being re enabled, ignore this
7802 *
7803 * Handles: ENABLED -> DESIRED
7804 */
7805 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7806 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7807 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7808 return false;
7809 }
7810
7811 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7812 *
7813 * Handles: UNDESIRED -> ENABLED
7814 */
7815 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7816 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7817 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7818
7819 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7820 * hot-plug, headless s3, dpms
7821 *
7822 * Handles: DESIRED -> DESIRED (Special case)
7823 */
7824 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7825 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7826 dm_con_state->update_hdcp = false;
7827 return true;
7828 }
7829
7830 /*
7831 * Handles: UNDESIRED -> UNDESIRED
7832 * DESIRED -> DESIRED
7833 * ENABLED -> ENABLED
7834 */
7835 if (old_state->content_protection == state->content_protection)
7836 return false;
7837
7838 /*
7839 * Handles: UNDESIRED -> DESIRED
7840 * DESIRED -> UNDESIRED
7841 * ENABLED -> UNDESIRED
7842 */
7843 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7844 return true;
7845
7846 /*
7847 * Handles: DESIRED -> ENABLED
7848 */
7849 return false;
7850 }
7851
7852 #endif
7853 static void remove_stream(struct amdgpu_device *adev,
7854 struct amdgpu_crtc *acrtc,
7855 struct dc_stream_state *stream)
7856 {
7857 /* this is the update mode case */
7858
7859 acrtc->otg_inst = -1;
7860 acrtc->enabled = false;
7861 }
7862
7863 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7864 struct dc_cursor_position *position)
7865 {
7866 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7867 int x, y;
7868 int xorigin = 0, yorigin = 0;
7869
7870 if (!crtc || !plane->state->fb)
7871 return 0;
7872
7873 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7874 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7875 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7876 __func__,
7877 plane->state->crtc_w,
7878 plane->state->crtc_h);
7879 return -EINVAL;
7880 }
7881
7882 x = plane->state->crtc_x;
7883 y = plane->state->crtc_y;
7884
7885 if (x <= -amdgpu_crtc->max_cursor_width ||
7886 y <= -amdgpu_crtc->max_cursor_height)
7887 return 0;
7888
7889 if (x < 0) {
7890 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7891 x = 0;
7892 }
7893 if (y < 0) {
7894 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7895 y = 0;
7896 }
7897 position->enable = true;
7898 position->translate_by_source = true;
7899 position->x = x;
7900 position->y = y;
7901 position->x_hotspot = xorigin;
7902 position->y_hotspot = yorigin;
7903
7904 return 0;
7905 }
7906
7907 static void handle_cursor_update(struct drm_plane *plane,
7908 struct drm_plane_state *old_plane_state)
7909 {
7910 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7911 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7912 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7913 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7914 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7915 uint64_t address = afb ? afb->address : 0;
7916 struct dc_cursor_position position = {0};
7917 struct dc_cursor_attributes attributes;
7918 int ret;
7919
7920 if (!plane->state->fb && !old_plane_state->fb)
7921 return;
7922
7923 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7924 __func__,
7925 amdgpu_crtc->crtc_id,
7926 plane->state->crtc_w,
7927 plane->state->crtc_h);
7928
7929 ret = get_cursor_position(plane, crtc, &position);
7930 if (ret)
7931 return;
7932
7933 if (!position.enable) {
7934 /* turn off cursor */
7935 if (crtc_state && crtc_state->stream) {
7936 mutex_lock(&adev->dm.dc_lock);
7937 dc_stream_set_cursor_position(crtc_state->stream,
7938 &position);
7939 mutex_unlock(&adev->dm.dc_lock);
7940 }
7941 return;
7942 }
7943
7944 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7945 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7946
7947 memset(&attributes, 0, sizeof(attributes));
7948 attributes.address.high_part = upper_32_bits(address);
7949 attributes.address.low_part = lower_32_bits(address);
7950 attributes.width = plane->state->crtc_w;
7951 attributes.height = plane->state->crtc_h;
7952 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7953 attributes.rotation_angle = 0;
7954 attributes.attribute_flags.value = 0;
7955
7956 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7957
7958 if (crtc_state->stream) {
7959 mutex_lock(&adev->dm.dc_lock);
7960 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7961 &attributes))
7962 DRM_ERROR("DC failed to set cursor attributes\n");
7963
7964 if (!dc_stream_set_cursor_position(crtc_state->stream,
7965 &position))
7966 DRM_ERROR("DC failed to set cursor position\n");
7967 mutex_unlock(&adev->dm.dc_lock);
7968 }
7969 }
7970
7971 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7972 {
7973
7974 assert_spin_locked(&acrtc->base.dev->event_lock);
7975 WARN_ON(acrtc->event);
7976
7977 acrtc->event = acrtc->base.state->event;
7978
7979 /* Set the flip status */
7980 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7981
7982 /* Mark this event as consumed */
7983 acrtc->base.state->event = NULL;
7984
7985 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7986 acrtc->crtc_id);
7987 }
7988
7989 static void update_freesync_state_on_stream(
7990 struct amdgpu_display_manager *dm,
7991 struct dm_crtc_state *new_crtc_state,
7992 struct dc_stream_state *new_stream,
7993 struct dc_plane_state *surface,
7994 u32 flip_timestamp_in_us)
7995 {
7996 struct mod_vrr_params vrr_params;
7997 struct dc_info_packet vrr_infopacket = {0};
7998 struct amdgpu_device *adev = dm->adev;
7999 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8000 unsigned long flags;
8001 bool pack_sdp_v1_3 = false;
8002
8003 if (!new_stream)
8004 return;
8005
8006 /*
8007 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8008 * For now it's sufficient to just guard against these conditions.
8009 */
8010
8011 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8012 return;
8013
8014 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8015 vrr_params = acrtc->dm_irq_params.vrr_params;
8016
8017 if (surface) {
8018 mod_freesync_handle_preflip(
8019 dm->freesync_module,
8020 surface,
8021 new_stream,
8022 flip_timestamp_in_us,
8023 &vrr_params);
8024
8025 if (adev->family < AMDGPU_FAMILY_AI &&
8026 amdgpu_dm_vrr_active(new_crtc_state)) {
8027 mod_freesync_handle_v_update(dm->freesync_module,
8028 new_stream, &vrr_params);
8029
8030 /* Need to call this before the frame ends. */
8031 dc_stream_adjust_vmin_vmax(dm->dc,
8032 new_crtc_state->stream,
8033 &vrr_params.adjust);
8034 }
8035 }
8036
8037 mod_freesync_build_vrr_infopacket(
8038 dm->freesync_module,
8039 new_stream,
8040 &vrr_params,
8041 PACKET_TYPE_VRR,
8042 TRANSFER_FUNC_UNKNOWN,
8043 &vrr_infopacket,
8044 pack_sdp_v1_3);
8045
8046 new_crtc_state->freesync_timing_changed |=
8047 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8048 &vrr_params.adjust,
8049 sizeof(vrr_params.adjust)) != 0);
8050
8051 new_crtc_state->freesync_vrr_info_changed |=
8052 (memcmp(&new_crtc_state->vrr_infopacket,
8053 &vrr_infopacket,
8054 sizeof(vrr_infopacket)) != 0);
8055
8056 acrtc->dm_irq_params.vrr_params = vrr_params;
8057 new_crtc_state->vrr_infopacket = vrr_infopacket;
8058
8059 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8060 new_stream->vrr_infopacket = vrr_infopacket;
8061
8062 if (new_crtc_state->freesync_vrr_info_changed)
8063 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8064 new_crtc_state->base.crtc->base.id,
8065 (int)new_crtc_state->base.vrr_enabled,
8066 (int)vrr_params.state);
8067
8068 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8069 }
8070
8071 static void update_stream_irq_parameters(
8072 struct amdgpu_display_manager *dm,
8073 struct dm_crtc_state *new_crtc_state)
8074 {
8075 struct dc_stream_state *new_stream = new_crtc_state->stream;
8076 struct mod_vrr_params vrr_params;
8077 struct mod_freesync_config config = new_crtc_state->freesync_config;
8078 struct amdgpu_device *adev = dm->adev;
8079 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8080 unsigned long flags;
8081
8082 if (!new_stream)
8083 return;
8084
8085 /*
8086 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8087 * For now it's sufficient to just guard against these conditions.
8088 */
8089 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8090 return;
8091
8092 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8093 vrr_params = acrtc->dm_irq_params.vrr_params;
8094
8095 if (new_crtc_state->vrr_supported &&
8096 config.min_refresh_in_uhz &&
8097 config.max_refresh_in_uhz) {
8098 /*
8099 * if freesync compatible mode was set, config.state will be set
8100 * in atomic check
8101 */
8102 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8103 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8104 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8105 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8106 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8107 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8108 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8109 } else {
8110 config.state = new_crtc_state->base.vrr_enabled ?
8111 VRR_STATE_ACTIVE_VARIABLE :
8112 VRR_STATE_INACTIVE;
8113 }
8114 } else {
8115 config.state = VRR_STATE_UNSUPPORTED;
8116 }
8117
8118 mod_freesync_build_vrr_params(dm->freesync_module,
8119 new_stream,
8120 &config, &vrr_params);
8121
8122 new_crtc_state->freesync_timing_changed |=
8123 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8124 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8125
8126 new_crtc_state->freesync_config = config;
8127 /* Copy state for access from DM IRQ handler */
8128 acrtc->dm_irq_params.freesync_config = config;
8129 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8130 acrtc->dm_irq_params.vrr_params = vrr_params;
8131 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8132 }
8133
8134 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8135 struct dm_crtc_state *new_state)
8136 {
8137 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8138 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8139
8140 if (!old_vrr_active && new_vrr_active) {
8141 /* Transition VRR inactive -> active:
8142 * While VRR is active, we must not disable vblank irq, as a
8143 * reenable after disable would compute bogus vblank/pflip
8144 * timestamps if it likely happened inside display front-porch.
8145 *
8146 * We also need vupdate irq for the actual core vblank handling
8147 * at end of vblank.
8148 */
8149 dm_set_vupdate_irq(new_state->base.crtc, true);
8150 drm_crtc_vblank_get(new_state->base.crtc);
8151 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8152 __func__, new_state->base.crtc->base.id);
8153 } else if (old_vrr_active && !new_vrr_active) {
8154 /* Transition VRR active -> inactive:
8155 * Allow vblank irq disable again for fixed refresh rate.
8156 */
8157 dm_set_vupdate_irq(new_state->base.crtc, false);
8158 drm_crtc_vblank_put(new_state->base.crtc);
8159 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8160 __func__, new_state->base.crtc->base.id);
8161 }
8162 }
8163
8164 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8165 {
8166 struct drm_plane *plane;
8167 struct drm_plane_state *old_plane_state, *new_plane_state;
8168 int i;
8169
8170 /*
8171 * TODO: Make this per-stream so we don't issue redundant updates for
8172 * commits with multiple streams.
8173 */
8174 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8175 new_plane_state, i)
8176 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8177 handle_cursor_update(plane, old_plane_state);
8178 }
8179
8180 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8181 struct dc_state *dc_state,
8182 struct drm_device *dev,
8183 struct amdgpu_display_manager *dm,
8184 struct drm_crtc *pcrtc,
8185 bool wait_for_vblank)
8186 {
8187 uint32_t i;
8188 uint64_t timestamp_ns;
8189 struct drm_plane *plane;
8190 struct drm_plane_state *old_plane_state, *new_plane_state;
8191 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8192 struct drm_crtc_state *new_pcrtc_state =
8193 drm_atomic_get_new_crtc_state(state, pcrtc);
8194 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8195 struct dm_crtc_state *dm_old_crtc_state =
8196 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8197 int planes_count = 0, vpos, hpos;
8198 long r;
8199 unsigned long flags;
8200 struct amdgpu_bo *abo;
8201 uint32_t target_vblank, last_flip_vblank;
8202 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8203 bool pflip_present = false;
8204 struct {
8205 struct dc_surface_update surface_updates[MAX_SURFACES];
8206 struct dc_plane_info plane_infos[MAX_SURFACES];
8207 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8208 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8209 struct dc_stream_update stream_update;
8210 } *bundle;
8211
8212 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8213
8214 if (!bundle) {
8215 dm_error("Failed to allocate update bundle\n");
8216 goto cleanup;
8217 }
8218
8219 /*
8220 * Disable the cursor first if we're disabling all the planes.
8221 * It'll remain on the screen after the planes are re-enabled
8222 * if we don't.
8223 */
8224 if (acrtc_state->active_planes == 0)
8225 amdgpu_dm_commit_cursors(state);
8226
8227 /* update planes when needed */
8228 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8229 struct drm_crtc *crtc = new_plane_state->crtc;
8230 struct drm_crtc_state *new_crtc_state;
8231 struct drm_framebuffer *fb = new_plane_state->fb;
8232 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8233 bool plane_needs_flip;
8234 struct dc_plane_state *dc_plane;
8235 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8236
8237 /* Cursor plane is handled after stream updates */
8238 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8239 continue;
8240
8241 if (!fb || !crtc || pcrtc != crtc)
8242 continue;
8243
8244 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8245 if (!new_crtc_state->active)
8246 continue;
8247
8248 dc_plane = dm_new_plane_state->dc_state;
8249
8250 bundle->surface_updates[planes_count].surface = dc_plane;
8251 if (new_pcrtc_state->color_mgmt_changed) {
8252 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8253 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8254 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8255 }
8256
8257 fill_dc_scaling_info(new_plane_state,
8258 &bundle->scaling_infos[planes_count]);
8259
8260 bundle->surface_updates[planes_count].scaling_info =
8261 &bundle->scaling_infos[planes_count];
8262
8263 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8264
8265 pflip_present = pflip_present || plane_needs_flip;
8266
8267 if (!plane_needs_flip) {
8268 planes_count += 1;
8269 continue;
8270 }
8271
8272 abo = gem_to_amdgpu_bo(fb->obj[0]);
8273
8274 /*
8275 * Wait for all fences on this FB. Do limited wait to avoid
8276 * deadlock during GPU reset when this fence will not signal
8277 * but we hold reservation lock for the BO.
8278 */
8279 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8280 false,
8281 msecs_to_jiffies(5000));
8282 if (unlikely(r <= 0))
8283 DRM_ERROR("Waiting for fences timed out!");
8284
8285 fill_dc_plane_info_and_addr(
8286 dm->adev, new_plane_state,
8287 afb->tiling_flags,
8288 &bundle->plane_infos[planes_count],
8289 &bundle->flip_addrs[planes_count].address,
8290 afb->tmz_surface, false);
8291
8292 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8293 new_plane_state->plane->index,
8294 bundle->plane_infos[planes_count].dcc.enable);
8295
8296 bundle->surface_updates[planes_count].plane_info =
8297 &bundle->plane_infos[planes_count];
8298
8299 /*
8300 * Only allow immediate flips for fast updates that don't
8301 * change FB pitch, DCC state, rotation or mirroing.
8302 */
8303 bundle->flip_addrs[planes_count].flip_immediate =
8304 crtc->state->async_flip &&
8305 acrtc_state->update_type == UPDATE_TYPE_FAST;
8306
8307 timestamp_ns = ktime_get_ns();
8308 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8309 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8310 bundle->surface_updates[planes_count].surface = dc_plane;
8311
8312 if (!bundle->surface_updates[planes_count].surface) {
8313 DRM_ERROR("No surface for CRTC: id=%d\n",
8314 acrtc_attach->crtc_id);
8315 continue;
8316 }
8317
8318 if (plane == pcrtc->primary)
8319 update_freesync_state_on_stream(
8320 dm,
8321 acrtc_state,
8322 acrtc_state->stream,
8323 dc_plane,
8324 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8325
8326 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8327 __func__,
8328 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8329 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8330
8331 planes_count += 1;
8332
8333 }
8334
8335 if (pflip_present) {
8336 if (!vrr_active) {
8337 /* Use old throttling in non-vrr fixed refresh rate mode
8338 * to keep flip scheduling based on target vblank counts
8339 * working in a backwards compatible way, e.g., for
8340 * clients using the GLX_OML_sync_control extension or
8341 * DRI3/Present extension with defined target_msc.
8342 */
8343 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8344 }
8345 else {
8346 /* For variable refresh rate mode only:
8347 * Get vblank of last completed flip to avoid > 1 vrr
8348 * flips per video frame by use of throttling, but allow
8349 * flip programming anywhere in the possibly large
8350 * variable vrr vblank interval for fine-grained flip
8351 * timing control and more opportunity to avoid stutter
8352 * on late submission of flips.
8353 */
8354 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8355 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8356 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8357 }
8358
8359 target_vblank = last_flip_vblank + wait_for_vblank;
8360
8361 /*
8362 * Wait until we're out of the vertical blank period before the one
8363 * targeted by the flip
8364 */
8365 while ((acrtc_attach->enabled &&
8366 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8367 0, &vpos, &hpos, NULL,
8368 NULL, &pcrtc->hwmode)
8369 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8370 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8371 (int)(target_vblank -
8372 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8373 usleep_range(1000, 1100);
8374 }
8375
8376 /**
8377 * Prepare the flip event for the pageflip interrupt to handle.
8378 *
8379 * This only works in the case where we've already turned on the
8380 * appropriate hardware blocks (eg. HUBP) so in the transition case
8381 * from 0 -> n planes we have to skip a hardware generated event
8382 * and rely on sending it from software.
8383 */
8384 if (acrtc_attach->base.state->event &&
8385 acrtc_state->active_planes > 0) {
8386 drm_crtc_vblank_get(pcrtc);
8387
8388 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8389
8390 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8391 prepare_flip_isr(acrtc_attach);
8392
8393 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8394 }
8395
8396 if (acrtc_state->stream) {
8397 if (acrtc_state->freesync_vrr_info_changed)
8398 bundle->stream_update.vrr_infopacket =
8399 &acrtc_state->stream->vrr_infopacket;
8400 }
8401 }
8402
8403 /* Update the planes if changed or disable if we don't have any. */
8404 if ((planes_count || acrtc_state->active_planes == 0) &&
8405 acrtc_state->stream) {
8406 bundle->stream_update.stream = acrtc_state->stream;
8407 if (new_pcrtc_state->mode_changed) {
8408 bundle->stream_update.src = acrtc_state->stream->src;
8409 bundle->stream_update.dst = acrtc_state->stream->dst;
8410 }
8411
8412 if (new_pcrtc_state->color_mgmt_changed) {
8413 /*
8414 * TODO: This isn't fully correct since we've actually
8415 * already modified the stream in place.
8416 */
8417 bundle->stream_update.gamut_remap =
8418 &acrtc_state->stream->gamut_remap_matrix;
8419 bundle->stream_update.output_csc_transform =
8420 &acrtc_state->stream->csc_color_matrix;
8421 bundle->stream_update.out_transfer_func =
8422 acrtc_state->stream->out_transfer_func;
8423 }
8424
8425 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8426 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8427 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8428
8429 /*
8430 * If FreeSync state on the stream has changed then we need to
8431 * re-adjust the min/max bounds now that DC doesn't handle this
8432 * as part of commit.
8433 */
8434 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8435 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8436 dc_stream_adjust_vmin_vmax(
8437 dm->dc, acrtc_state->stream,
8438 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8439 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8440 }
8441 mutex_lock(&dm->dc_lock);
8442 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8443 acrtc_state->stream->link->psr_settings.psr_allow_active)
8444 amdgpu_dm_psr_disable(acrtc_state->stream);
8445
8446 dc_commit_updates_for_stream(dm->dc,
8447 bundle->surface_updates,
8448 planes_count,
8449 acrtc_state->stream,
8450 &bundle->stream_update,
8451 dc_state);
8452
8453 /**
8454 * Enable or disable the interrupts on the backend.
8455 *
8456 * Most pipes are put into power gating when unused.
8457 *
8458 * When power gating is enabled on a pipe we lose the
8459 * interrupt enablement state when power gating is disabled.
8460 *
8461 * So we need to update the IRQ control state in hardware
8462 * whenever the pipe turns on (since it could be previously
8463 * power gated) or off (since some pipes can't be power gated
8464 * on some ASICs).
8465 */
8466 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8467 dm_update_pflip_irq_state(drm_to_adev(dev),
8468 acrtc_attach);
8469
8470 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8471 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8472 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8473 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8474 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8475 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8476 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8477 amdgpu_dm_psr_enable(acrtc_state->stream);
8478 }
8479
8480 mutex_unlock(&dm->dc_lock);
8481 }
8482
8483 /*
8484 * Update cursor state *after* programming all the planes.
8485 * This avoids redundant programming in the case where we're going
8486 * to be disabling a single plane - those pipes are being disabled.
8487 */
8488 if (acrtc_state->active_planes)
8489 amdgpu_dm_commit_cursors(state);
8490
8491 cleanup:
8492 kfree(bundle);
8493 }
8494
8495 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8496 struct drm_atomic_state *state)
8497 {
8498 struct amdgpu_device *adev = drm_to_adev(dev);
8499 struct amdgpu_dm_connector *aconnector;
8500 struct drm_connector *connector;
8501 struct drm_connector_state *old_con_state, *new_con_state;
8502 struct drm_crtc_state *new_crtc_state;
8503 struct dm_crtc_state *new_dm_crtc_state;
8504 const struct dc_stream_status *status;
8505 int i, inst;
8506
8507 /* Notify device removals. */
8508 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8509 if (old_con_state->crtc != new_con_state->crtc) {
8510 /* CRTC changes require notification. */
8511 goto notify;
8512 }
8513
8514 if (!new_con_state->crtc)
8515 continue;
8516
8517 new_crtc_state = drm_atomic_get_new_crtc_state(
8518 state, new_con_state->crtc);
8519
8520 if (!new_crtc_state)
8521 continue;
8522
8523 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8524 continue;
8525
8526 notify:
8527 aconnector = to_amdgpu_dm_connector(connector);
8528
8529 mutex_lock(&adev->dm.audio_lock);
8530 inst = aconnector->audio_inst;
8531 aconnector->audio_inst = -1;
8532 mutex_unlock(&adev->dm.audio_lock);
8533
8534 amdgpu_dm_audio_eld_notify(adev, inst);
8535 }
8536
8537 /* Notify audio device additions. */
8538 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8539 if (!new_con_state->crtc)
8540 continue;
8541
8542 new_crtc_state = drm_atomic_get_new_crtc_state(
8543 state, new_con_state->crtc);
8544
8545 if (!new_crtc_state)
8546 continue;
8547
8548 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8549 continue;
8550
8551 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8552 if (!new_dm_crtc_state->stream)
8553 continue;
8554
8555 status = dc_stream_get_status(new_dm_crtc_state->stream);
8556 if (!status)
8557 continue;
8558
8559 aconnector = to_amdgpu_dm_connector(connector);
8560
8561 mutex_lock(&adev->dm.audio_lock);
8562 inst = status->audio_inst;
8563 aconnector->audio_inst = inst;
8564 mutex_unlock(&adev->dm.audio_lock);
8565
8566 amdgpu_dm_audio_eld_notify(adev, inst);
8567 }
8568 }
8569
8570 /*
8571 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8572 * @crtc_state: the DRM CRTC state
8573 * @stream_state: the DC stream state.
8574 *
8575 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8576 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8577 */
8578 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8579 struct dc_stream_state *stream_state)
8580 {
8581 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8582 }
8583
8584 /**
8585 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8586 * @state: The atomic state to commit
8587 *
8588 * This will tell DC to commit the constructed DC state from atomic_check,
8589 * programming the hardware. Any failures here implies a hardware failure, since
8590 * atomic check should have filtered anything non-kosher.
8591 */
8592 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8593 {
8594 struct drm_device *dev = state->dev;
8595 struct amdgpu_device *adev = drm_to_adev(dev);
8596 struct amdgpu_display_manager *dm = &adev->dm;
8597 struct dm_atomic_state *dm_state;
8598 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8599 uint32_t i, j;
8600 struct drm_crtc *crtc;
8601 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8602 unsigned long flags;
8603 bool wait_for_vblank = true;
8604 struct drm_connector *connector;
8605 struct drm_connector_state *old_con_state, *new_con_state;
8606 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8607 int crtc_disable_count = 0;
8608 bool mode_set_reset_required = false;
8609
8610 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8611
8612 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8613
8614 dm_state = dm_atomic_get_new_state(state);
8615 if (dm_state && dm_state->context) {
8616 dc_state = dm_state->context;
8617 } else {
8618 /* No state changes, retain current state. */
8619 dc_state_temp = dc_create_state(dm->dc);
8620 ASSERT(dc_state_temp);
8621 dc_state = dc_state_temp;
8622 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8623 }
8624
8625 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8626 new_crtc_state, i) {
8627 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8628
8629 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8630
8631 if (old_crtc_state->active &&
8632 (!new_crtc_state->active ||
8633 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8634 manage_dm_interrupts(adev, acrtc, false);
8635 dc_stream_release(dm_old_crtc_state->stream);
8636 }
8637 }
8638
8639 drm_atomic_helper_calc_timestamping_constants(state);
8640
8641 /* update changed items */
8642 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8643 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8644
8645 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8646 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8647
8648 DRM_DEBUG_ATOMIC(
8649 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8650 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8651 "connectors_changed:%d\n",
8652 acrtc->crtc_id,
8653 new_crtc_state->enable,
8654 new_crtc_state->active,
8655 new_crtc_state->planes_changed,
8656 new_crtc_state->mode_changed,
8657 new_crtc_state->active_changed,
8658 new_crtc_state->connectors_changed);
8659
8660 /* Disable cursor if disabling crtc */
8661 if (old_crtc_state->active && !new_crtc_state->active) {
8662 struct dc_cursor_position position;
8663
8664 memset(&position, 0, sizeof(position));
8665 mutex_lock(&dm->dc_lock);
8666 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8667 mutex_unlock(&dm->dc_lock);
8668 }
8669
8670 /* Copy all transient state flags into dc state */
8671 if (dm_new_crtc_state->stream) {
8672 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8673 dm_new_crtc_state->stream);
8674 }
8675
8676 /* handles headless hotplug case, updating new_state and
8677 * aconnector as needed
8678 */
8679
8680 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8681
8682 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8683
8684 if (!dm_new_crtc_state->stream) {
8685 /*
8686 * this could happen because of issues with
8687 * userspace notifications delivery.
8688 * In this case userspace tries to set mode on
8689 * display which is disconnected in fact.
8690 * dc_sink is NULL in this case on aconnector.
8691 * We expect reset mode will come soon.
8692 *
8693 * This can also happen when unplug is done
8694 * during resume sequence ended
8695 *
8696 * In this case, we want to pretend we still
8697 * have a sink to keep the pipe running so that
8698 * hw state is consistent with the sw state
8699 */
8700 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8701 __func__, acrtc->base.base.id);
8702 continue;
8703 }
8704
8705 if (dm_old_crtc_state->stream)
8706 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8707
8708 pm_runtime_get_noresume(dev->dev);
8709
8710 acrtc->enabled = true;
8711 acrtc->hw_mode = new_crtc_state->mode;
8712 crtc->hwmode = new_crtc_state->mode;
8713 mode_set_reset_required = true;
8714 } else if (modereset_required(new_crtc_state)) {
8715 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8716 /* i.e. reset mode */
8717 if (dm_old_crtc_state->stream)
8718 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8719
8720 mode_set_reset_required = true;
8721 }
8722 } /* for_each_crtc_in_state() */
8723
8724 if (dc_state) {
8725 /* if there mode set or reset, disable eDP PSR */
8726 if (mode_set_reset_required)
8727 amdgpu_dm_psr_disable_all(dm);
8728
8729 dm_enable_per_frame_crtc_master_sync(dc_state);
8730 mutex_lock(&dm->dc_lock);
8731 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8732 #if defined(CONFIG_DRM_AMD_DC_DCN)
8733 /* Allow idle optimization when vblank count is 0 for display off */
8734 if (dm->active_vblank_irq_count == 0)
8735 dc_allow_idle_optimizations(dm->dc,true);
8736 #endif
8737 mutex_unlock(&dm->dc_lock);
8738 }
8739
8740 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8741 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8742
8743 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8744
8745 if (dm_new_crtc_state->stream != NULL) {
8746 const struct dc_stream_status *status =
8747 dc_stream_get_status(dm_new_crtc_state->stream);
8748
8749 if (!status)
8750 status = dc_stream_get_status_from_state(dc_state,
8751 dm_new_crtc_state->stream);
8752 if (!status)
8753 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8754 else
8755 acrtc->otg_inst = status->primary_otg_inst;
8756 }
8757 }
8758 #ifdef CONFIG_DRM_AMD_DC_HDCP
8759 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8760 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8761 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8762 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8763
8764 new_crtc_state = NULL;
8765
8766 if (acrtc)
8767 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8768
8769 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8770
8771 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8772 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8773 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8774 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8775 dm_new_con_state->update_hdcp = true;
8776 continue;
8777 }
8778
8779 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8780 hdcp_update_display(
8781 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8782 new_con_state->hdcp_content_type,
8783 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8784 }
8785 #endif
8786
8787 /* Handle connector state changes */
8788 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8789 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8790 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8791 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8792 struct dc_surface_update dummy_updates[MAX_SURFACES];
8793 struct dc_stream_update stream_update;
8794 struct dc_info_packet hdr_packet;
8795 struct dc_stream_status *status = NULL;
8796 bool abm_changed, hdr_changed, scaling_changed;
8797
8798 memset(&dummy_updates, 0, sizeof(dummy_updates));
8799 memset(&stream_update, 0, sizeof(stream_update));
8800
8801 if (acrtc) {
8802 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8803 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8804 }
8805
8806 /* Skip any modesets/resets */
8807 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8808 continue;
8809
8810 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8811 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8812
8813 scaling_changed = is_scaling_state_different(dm_new_con_state,
8814 dm_old_con_state);
8815
8816 abm_changed = dm_new_crtc_state->abm_level !=
8817 dm_old_crtc_state->abm_level;
8818
8819 hdr_changed =
8820 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8821
8822 if (!scaling_changed && !abm_changed && !hdr_changed)
8823 continue;
8824
8825 stream_update.stream = dm_new_crtc_state->stream;
8826 if (scaling_changed) {
8827 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8828 dm_new_con_state, dm_new_crtc_state->stream);
8829
8830 stream_update.src = dm_new_crtc_state->stream->src;
8831 stream_update.dst = dm_new_crtc_state->stream->dst;
8832 }
8833
8834 if (abm_changed) {
8835 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8836
8837 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8838 }
8839
8840 if (hdr_changed) {
8841 fill_hdr_info_packet(new_con_state, &hdr_packet);
8842 stream_update.hdr_static_metadata = &hdr_packet;
8843 }
8844
8845 status = dc_stream_get_status(dm_new_crtc_state->stream);
8846 WARN_ON(!status);
8847 WARN_ON(!status->plane_count);
8848
8849 /*
8850 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8851 * Here we create an empty update on each plane.
8852 * To fix this, DC should permit updating only stream properties.
8853 */
8854 for (j = 0; j < status->plane_count; j++)
8855 dummy_updates[j].surface = status->plane_states[0];
8856
8857
8858 mutex_lock(&dm->dc_lock);
8859 dc_commit_updates_for_stream(dm->dc,
8860 dummy_updates,
8861 status->plane_count,
8862 dm_new_crtc_state->stream,
8863 &stream_update,
8864 dc_state);
8865 mutex_unlock(&dm->dc_lock);
8866 }
8867
8868 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8869 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8870 new_crtc_state, i) {
8871 if (old_crtc_state->active && !new_crtc_state->active)
8872 crtc_disable_count++;
8873
8874 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8875 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8876
8877 /* For freesync config update on crtc state and params for irq */
8878 update_stream_irq_parameters(dm, dm_new_crtc_state);
8879
8880 /* Handle vrr on->off / off->on transitions */
8881 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8882 dm_new_crtc_state);
8883 }
8884
8885 /**
8886 * Enable interrupts for CRTCs that are newly enabled or went through
8887 * a modeset. It was intentionally deferred until after the front end
8888 * state was modified to wait until the OTG was on and so the IRQ
8889 * handlers didn't access stale or invalid state.
8890 */
8891 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8892 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8893 #ifdef CONFIG_DEBUG_FS
8894 bool configure_crc = false;
8895 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8896 #endif
8897 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8898
8899 if (new_crtc_state->active &&
8900 (!old_crtc_state->active ||
8901 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8902 dc_stream_retain(dm_new_crtc_state->stream);
8903 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8904 manage_dm_interrupts(adev, acrtc, true);
8905
8906 #ifdef CONFIG_DEBUG_FS
8907 /**
8908 * Frontend may have changed so reapply the CRC capture
8909 * settings for the stream.
8910 */
8911 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8912 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8913 cur_crc_src = acrtc->dm_irq_params.crc_src;
8914 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8915
8916 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8917 configure_crc = true;
8918 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8919 if (amdgpu_dm_crc_window_is_activated(crtc))
8920 configure_crc = false;
8921 #endif
8922 }
8923
8924 if (configure_crc)
8925 amdgpu_dm_crtc_configure_crc_source(
8926 crtc, dm_new_crtc_state, cur_crc_src);
8927 #endif
8928 }
8929 }
8930
8931 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8932 if (new_crtc_state->async_flip)
8933 wait_for_vblank = false;
8934
8935 /* update planes when needed per crtc*/
8936 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8937 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8938
8939 if (dm_new_crtc_state->stream)
8940 amdgpu_dm_commit_planes(state, dc_state, dev,
8941 dm, crtc, wait_for_vblank);
8942 }
8943
8944 /* Update audio instances for each connector. */
8945 amdgpu_dm_commit_audio(dev, state);
8946
8947 /*
8948 * send vblank event on all events not handled in flip and
8949 * mark consumed event for drm_atomic_helper_commit_hw_done
8950 */
8951 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8952 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8953
8954 if (new_crtc_state->event)
8955 drm_send_event_locked(dev, &new_crtc_state->event->base);
8956
8957 new_crtc_state->event = NULL;
8958 }
8959 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8960
8961 /* Signal HW programming completion */
8962 drm_atomic_helper_commit_hw_done(state);
8963
8964 if (wait_for_vblank)
8965 drm_atomic_helper_wait_for_flip_done(dev, state);
8966
8967 drm_atomic_helper_cleanup_planes(dev, state);
8968
8969 /* return the stolen vga memory back to VRAM */
8970 if (!adev->mman.keep_stolen_vga_memory)
8971 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8972 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8973
8974 /*
8975 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8976 * so we can put the GPU into runtime suspend if we're not driving any
8977 * displays anymore
8978 */
8979 for (i = 0; i < crtc_disable_count; i++)
8980 pm_runtime_put_autosuspend(dev->dev);
8981 pm_runtime_mark_last_busy(dev->dev);
8982
8983 if (dc_state_temp)
8984 dc_release_state(dc_state_temp);
8985 }
8986
8987
8988 static int dm_force_atomic_commit(struct drm_connector *connector)
8989 {
8990 int ret = 0;
8991 struct drm_device *ddev = connector->dev;
8992 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8993 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8994 struct drm_plane *plane = disconnected_acrtc->base.primary;
8995 struct drm_connector_state *conn_state;
8996 struct drm_crtc_state *crtc_state;
8997 struct drm_plane_state *plane_state;
8998
8999 if (!state)
9000 return -ENOMEM;
9001
9002 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9003
9004 /* Construct an atomic state to restore previous display setting */
9005
9006 /*
9007 * Attach connectors to drm_atomic_state
9008 */
9009 conn_state = drm_atomic_get_connector_state(state, connector);
9010
9011 ret = PTR_ERR_OR_ZERO(conn_state);
9012 if (ret)
9013 goto out;
9014
9015 /* Attach crtc to drm_atomic_state*/
9016 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9017
9018 ret = PTR_ERR_OR_ZERO(crtc_state);
9019 if (ret)
9020 goto out;
9021
9022 /* force a restore */
9023 crtc_state->mode_changed = true;
9024
9025 /* Attach plane to drm_atomic_state */
9026 plane_state = drm_atomic_get_plane_state(state, plane);
9027
9028 ret = PTR_ERR_OR_ZERO(plane_state);
9029 if (ret)
9030 goto out;
9031
9032 /* Call commit internally with the state we just constructed */
9033 ret = drm_atomic_commit(state);
9034
9035 out:
9036 drm_atomic_state_put(state);
9037 if (ret)
9038 DRM_ERROR("Restoring old state failed with %i\n", ret);
9039
9040 return ret;
9041 }
9042
9043 /*
9044 * This function handles all cases when set mode does not come upon hotplug.
9045 * This includes when a display is unplugged then plugged back into the
9046 * same port and when running without usermode desktop manager supprot
9047 */
9048 void dm_restore_drm_connector_state(struct drm_device *dev,
9049 struct drm_connector *connector)
9050 {
9051 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9052 struct amdgpu_crtc *disconnected_acrtc;
9053 struct dm_crtc_state *acrtc_state;
9054
9055 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9056 return;
9057
9058 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9059 if (!disconnected_acrtc)
9060 return;
9061
9062 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9063 if (!acrtc_state->stream)
9064 return;
9065
9066 /*
9067 * If the previous sink is not released and different from the current,
9068 * we deduce we are in a state where we can not rely on usermode call
9069 * to turn on the display, so we do it here
9070 */
9071 if (acrtc_state->stream->sink != aconnector->dc_sink)
9072 dm_force_atomic_commit(&aconnector->base);
9073 }
9074
9075 /*
9076 * Grabs all modesetting locks to serialize against any blocking commits,
9077 * Waits for completion of all non blocking commits.
9078 */
9079 static int do_aquire_global_lock(struct drm_device *dev,
9080 struct drm_atomic_state *state)
9081 {
9082 struct drm_crtc *crtc;
9083 struct drm_crtc_commit *commit;
9084 long ret;
9085
9086 /*
9087 * Adding all modeset locks to aquire_ctx will
9088 * ensure that when the framework release it the
9089 * extra locks we are locking here will get released to
9090 */
9091 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9092 if (ret)
9093 return ret;
9094
9095 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9096 spin_lock(&crtc->commit_lock);
9097 commit = list_first_entry_or_null(&crtc->commit_list,
9098 struct drm_crtc_commit, commit_entry);
9099 if (commit)
9100 drm_crtc_commit_get(commit);
9101 spin_unlock(&crtc->commit_lock);
9102
9103 if (!commit)
9104 continue;
9105
9106 /*
9107 * Make sure all pending HW programming completed and
9108 * page flips done
9109 */
9110 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9111
9112 if (ret > 0)
9113 ret = wait_for_completion_interruptible_timeout(
9114 &commit->flip_done, 10*HZ);
9115
9116 if (ret == 0)
9117 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9118 "timed out\n", crtc->base.id, crtc->name);
9119
9120 drm_crtc_commit_put(commit);
9121 }
9122
9123 return ret < 0 ? ret : 0;
9124 }
9125
9126 static void get_freesync_config_for_crtc(
9127 struct dm_crtc_state *new_crtc_state,
9128 struct dm_connector_state *new_con_state)
9129 {
9130 struct mod_freesync_config config = {0};
9131 struct amdgpu_dm_connector *aconnector =
9132 to_amdgpu_dm_connector(new_con_state->base.connector);
9133 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9134 int vrefresh = drm_mode_vrefresh(mode);
9135 bool fs_vid_mode = false;
9136
9137 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9138 vrefresh >= aconnector->min_vfreq &&
9139 vrefresh <= aconnector->max_vfreq;
9140
9141 if (new_crtc_state->vrr_supported) {
9142 new_crtc_state->stream->ignore_msa_timing_param = true;
9143 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9144
9145 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9146 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9147 config.vsif_supported = true;
9148 config.btr = true;
9149
9150 if (fs_vid_mode) {
9151 config.state = VRR_STATE_ACTIVE_FIXED;
9152 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9153 goto out;
9154 } else if (new_crtc_state->base.vrr_enabled) {
9155 config.state = VRR_STATE_ACTIVE_VARIABLE;
9156 } else {
9157 config.state = VRR_STATE_INACTIVE;
9158 }
9159 }
9160 out:
9161 new_crtc_state->freesync_config = config;
9162 }
9163
9164 static void reset_freesync_config_for_crtc(
9165 struct dm_crtc_state *new_crtc_state)
9166 {
9167 new_crtc_state->vrr_supported = false;
9168
9169 memset(&new_crtc_state->vrr_infopacket, 0,
9170 sizeof(new_crtc_state->vrr_infopacket));
9171 }
9172
9173 static bool
9174 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9175 struct drm_crtc_state *new_crtc_state)
9176 {
9177 struct drm_display_mode old_mode, new_mode;
9178
9179 if (!old_crtc_state || !new_crtc_state)
9180 return false;
9181
9182 old_mode = old_crtc_state->mode;
9183 new_mode = new_crtc_state->mode;
9184
9185 if (old_mode.clock == new_mode.clock &&
9186 old_mode.hdisplay == new_mode.hdisplay &&
9187 old_mode.vdisplay == new_mode.vdisplay &&
9188 old_mode.htotal == new_mode.htotal &&
9189 old_mode.vtotal != new_mode.vtotal &&
9190 old_mode.hsync_start == new_mode.hsync_start &&
9191 old_mode.vsync_start != new_mode.vsync_start &&
9192 old_mode.hsync_end == new_mode.hsync_end &&
9193 old_mode.vsync_end != new_mode.vsync_end &&
9194 old_mode.hskew == new_mode.hskew &&
9195 old_mode.vscan == new_mode.vscan &&
9196 (old_mode.vsync_end - old_mode.vsync_start) ==
9197 (new_mode.vsync_end - new_mode.vsync_start))
9198 return true;
9199
9200 return false;
9201 }
9202
9203 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9204 uint64_t num, den, res;
9205 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9206
9207 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9208
9209 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9210 den = (unsigned long long)new_crtc_state->mode.htotal *
9211 (unsigned long long)new_crtc_state->mode.vtotal;
9212
9213 res = div_u64(num, den);
9214 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9215 }
9216
9217 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9218 struct drm_atomic_state *state,
9219 struct drm_crtc *crtc,
9220 struct drm_crtc_state *old_crtc_state,
9221 struct drm_crtc_state *new_crtc_state,
9222 bool enable,
9223 bool *lock_and_validation_needed)
9224 {
9225 struct dm_atomic_state *dm_state = NULL;
9226 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9227 struct dc_stream_state *new_stream;
9228 int ret = 0;
9229
9230 /*
9231 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9232 * update changed items
9233 */
9234 struct amdgpu_crtc *acrtc = NULL;
9235 struct amdgpu_dm_connector *aconnector = NULL;
9236 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9237 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9238
9239 new_stream = NULL;
9240
9241 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9242 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9243 acrtc = to_amdgpu_crtc(crtc);
9244 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9245
9246 /* TODO This hack should go away */
9247 if (aconnector && enable) {
9248 /* Make sure fake sink is created in plug-in scenario */
9249 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9250 &aconnector->base);
9251 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9252 &aconnector->base);
9253
9254 if (IS_ERR(drm_new_conn_state)) {
9255 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9256 goto fail;
9257 }
9258
9259 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9260 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9261
9262 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9263 goto skip_modeset;
9264
9265 new_stream = create_validate_stream_for_sink(aconnector,
9266 &new_crtc_state->mode,
9267 dm_new_conn_state,
9268 dm_old_crtc_state->stream);
9269
9270 /*
9271 * we can have no stream on ACTION_SET if a display
9272 * was disconnected during S3, in this case it is not an
9273 * error, the OS will be updated after detection, and
9274 * will do the right thing on next atomic commit
9275 */
9276
9277 if (!new_stream) {
9278 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9279 __func__, acrtc->base.base.id);
9280 ret = -ENOMEM;
9281 goto fail;
9282 }
9283
9284 /*
9285 * TODO: Check VSDB bits to decide whether this should
9286 * be enabled or not.
9287 */
9288 new_stream->triggered_crtc_reset.enabled =
9289 dm->force_timing_sync;
9290
9291 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9292
9293 ret = fill_hdr_info_packet(drm_new_conn_state,
9294 &new_stream->hdr_static_metadata);
9295 if (ret)
9296 goto fail;
9297
9298 /*
9299 * If we already removed the old stream from the context
9300 * (and set the new stream to NULL) then we can't reuse
9301 * the old stream even if the stream and scaling are unchanged.
9302 * We'll hit the BUG_ON and black screen.
9303 *
9304 * TODO: Refactor this function to allow this check to work
9305 * in all conditions.
9306 */
9307 if (amdgpu_freesync_vid_mode &&
9308 dm_new_crtc_state->stream &&
9309 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9310 goto skip_modeset;
9311
9312 if (dm_new_crtc_state->stream &&
9313 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9314 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9315 new_crtc_state->mode_changed = false;
9316 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9317 new_crtc_state->mode_changed);
9318 }
9319 }
9320
9321 /* mode_changed flag may get updated above, need to check again */
9322 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9323 goto skip_modeset;
9324
9325 DRM_DEBUG_ATOMIC(
9326 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9327 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9328 "connectors_changed:%d\n",
9329 acrtc->crtc_id,
9330 new_crtc_state->enable,
9331 new_crtc_state->active,
9332 new_crtc_state->planes_changed,
9333 new_crtc_state->mode_changed,
9334 new_crtc_state->active_changed,
9335 new_crtc_state->connectors_changed);
9336
9337 /* Remove stream for any changed/disabled CRTC */
9338 if (!enable) {
9339
9340 if (!dm_old_crtc_state->stream)
9341 goto skip_modeset;
9342
9343 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9344 is_timing_unchanged_for_freesync(new_crtc_state,
9345 old_crtc_state)) {
9346 new_crtc_state->mode_changed = false;
9347 DRM_DEBUG_DRIVER(
9348 "Mode change not required for front porch change, "
9349 "setting mode_changed to %d",
9350 new_crtc_state->mode_changed);
9351
9352 set_freesync_fixed_config(dm_new_crtc_state);
9353
9354 goto skip_modeset;
9355 } else if (amdgpu_freesync_vid_mode && aconnector &&
9356 is_freesync_video_mode(&new_crtc_state->mode,
9357 aconnector)) {
9358 set_freesync_fixed_config(dm_new_crtc_state);
9359 }
9360
9361 ret = dm_atomic_get_state(state, &dm_state);
9362 if (ret)
9363 goto fail;
9364
9365 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9366 crtc->base.id);
9367
9368 /* i.e. reset mode */
9369 if (dc_remove_stream_from_ctx(
9370 dm->dc,
9371 dm_state->context,
9372 dm_old_crtc_state->stream) != DC_OK) {
9373 ret = -EINVAL;
9374 goto fail;
9375 }
9376
9377 dc_stream_release(dm_old_crtc_state->stream);
9378 dm_new_crtc_state->stream = NULL;
9379
9380 reset_freesync_config_for_crtc(dm_new_crtc_state);
9381
9382 *lock_and_validation_needed = true;
9383
9384 } else {/* Add stream for any updated/enabled CRTC */
9385 /*
9386 * Quick fix to prevent NULL pointer on new_stream when
9387 * added MST connectors not found in existing crtc_state in the chained mode
9388 * TODO: need to dig out the root cause of that
9389 */
9390 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9391 goto skip_modeset;
9392
9393 if (modereset_required(new_crtc_state))
9394 goto skip_modeset;
9395
9396 if (modeset_required(new_crtc_state, new_stream,
9397 dm_old_crtc_state->stream)) {
9398
9399 WARN_ON(dm_new_crtc_state->stream);
9400
9401 ret = dm_atomic_get_state(state, &dm_state);
9402 if (ret)
9403 goto fail;
9404
9405 dm_new_crtc_state->stream = new_stream;
9406
9407 dc_stream_retain(new_stream);
9408
9409 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9410 crtc->base.id);
9411
9412 if (dc_add_stream_to_ctx(
9413 dm->dc,
9414 dm_state->context,
9415 dm_new_crtc_state->stream) != DC_OK) {
9416 ret = -EINVAL;
9417 goto fail;
9418 }
9419
9420 *lock_and_validation_needed = true;
9421 }
9422 }
9423
9424 skip_modeset:
9425 /* Release extra reference */
9426 if (new_stream)
9427 dc_stream_release(new_stream);
9428
9429 /*
9430 * We want to do dc stream updates that do not require a
9431 * full modeset below.
9432 */
9433 if (!(enable && aconnector && new_crtc_state->active))
9434 return 0;
9435 /*
9436 * Given above conditions, the dc state cannot be NULL because:
9437 * 1. We're in the process of enabling CRTCs (just been added
9438 * to the dc context, or already is on the context)
9439 * 2. Has a valid connector attached, and
9440 * 3. Is currently active and enabled.
9441 * => The dc stream state currently exists.
9442 */
9443 BUG_ON(dm_new_crtc_state->stream == NULL);
9444
9445 /* Scaling or underscan settings */
9446 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9447 update_stream_scaling_settings(
9448 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9449
9450 /* ABM settings */
9451 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9452
9453 /*
9454 * Color management settings. We also update color properties
9455 * when a modeset is needed, to ensure it gets reprogrammed.
9456 */
9457 if (dm_new_crtc_state->base.color_mgmt_changed ||
9458 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9459 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9460 if (ret)
9461 goto fail;
9462 }
9463
9464 /* Update Freesync settings. */
9465 get_freesync_config_for_crtc(dm_new_crtc_state,
9466 dm_new_conn_state);
9467
9468 return ret;
9469
9470 fail:
9471 if (new_stream)
9472 dc_stream_release(new_stream);
9473 return ret;
9474 }
9475
9476 static bool should_reset_plane(struct drm_atomic_state *state,
9477 struct drm_plane *plane,
9478 struct drm_plane_state *old_plane_state,
9479 struct drm_plane_state *new_plane_state)
9480 {
9481 struct drm_plane *other;
9482 struct drm_plane_state *old_other_state, *new_other_state;
9483 struct drm_crtc_state *new_crtc_state;
9484 int i;
9485
9486 /*
9487 * TODO: Remove this hack once the checks below are sufficient
9488 * enough to determine when we need to reset all the planes on
9489 * the stream.
9490 */
9491 if (state->allow_modeset)
9492 return true;
9493
9494 /* Exit early if we know that we're adding or removing the plane. */
9495 if (old_plane_state->crtc != new_plane_state->crtc)
9496 return true;
9497
9498 /* old crtc == new_crtc == NULL, plane not in context. */
9499 if (!new_plane_state->crtc)
9500 return false;
9501
9502 new_crtc_state =
9503 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9504
9505 if (!new_crtc_state)
9506 return true;
9507
9508 /* CRTC Degamma changes currently require us to recreate planes. */
9509 if (new_crtc_state->color_mgmt_changed)
9510 return true;
9511
9512 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9513 return true;
9514
9515 /*
9516 * If there are any new primary or overlay planes being added or
9517 * removed then the z-order can potentially change. To ensure
9518 * correct z-order and pipe acquisition the current DC architecture
9519 * requires us to remove and recreate all existing planes.
9520 *
9521 * TODO: Come up with a more elegant solution for this.
9522 */
9523 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9524 struct amdgpu_framebuffer *old_afb, *new_afb;
9525 if (other->type == DRM_PLANE_TYPE_CURSOR)
9526 continue;
9527
9528 if (old_other_state->crtc != new_plane_state->crtc &&
9529 new_other_state->crtc != new_plane_state->crtc)
9530 continue;
9531
9532 if (old_other_state->crtc != new_other_state->crtc)
9533 return true;
9534
9535 /* Src/dst size and scaling updates. */
9536 if (old_other_state->src_w != new_other_state->src_w ||
9537 old_other_state->src_h != new_other_state->src_h ||
9538 old_other_state->crtc_w != new_other_state->crtc_w ||
9539 old_other_state->crtc_h != new_other_state->crtc_h)
9540 return true;
9541
9542 /* Rotation / mirroring updates. */
9543 if (old_other_state->rotation != new_other_state->rotation)
9544 return true;
9545
9546 /* Blending updates. */
9547 if (old_other_state->pixel_blend_mode !=
9548 new_other_state->pixel_blend_mode)
9549 return true;
9550
9551 /* Alpha updates. */
9552 if (old_other_state->alpha != new_other_state->alpha)
9553 return true;
9554
9555 /* Colorspace changes. */
9556 if (old_other_state->color_range != new_other_state->color_range ||
9557 old_other_state->color_encoding != new_other_state->color_encoding)
9558 return true;
9559
9560 /* Framebuffer checks fall at the end. */
9561 if (!old_other_state->fb || !new_other_state->fb)
9562 continue;
9563
9564 /* Pixel format changes can require bandwidth updates. */
9565 if (old_other_state->fb->format != new_other_state->fb->format)
9566 return true;
9567
9568 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9569 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9570
9571 /* Tiling and DCC changes also require bandwidth updates. */
9572 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9573 old_afb->base.modifier != new_afb->base.modifier)
9574 return true;
9575 }
9576
9577 return false;
9578 }
9579
9580 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9581 struct drm_plane_state *new_plane_state,
9582 struct drm_framebuffer *fb)
9583 {
9584 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9585 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9586 unsigned int pitch;
9587 bool linear;
9588
9589 if (fb->width > new_acrtc->max_cursor_width ||
9590 fb->height > new_acrtc->max_cursor_height) {
9591 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9592 new_plane_state->fb->width,
9593 new_plane_state->fb->height);
9594 return -EINVAL;
9595 }
9596 if (new_plane_state->src_w != fb->width << 16 ||
9597 new_plane_state->src_h != fb->height << 16) {
9598 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9599 return -EINVAL;
9600 }
9601
9602 /* Pitch in pixels */
9603 pitch = fb->pitches[0] / fb->format->cpp[0];
9604
9605 if (fb->width != pitch) {
9606 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9607 fb->width, pitch);
9608 return -EINVAL;
9609 }
9610
9611 switch (pitch) {
9612 case 64:
9613 case 128:
9614 case 256:
9615 /* FB pitch is supported by cursor plane */
9616 break;
9617 default:
9618 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9619 return -EINVAL;
9620 }
9621
9622 /* Core DRM takes care of checking FB modifiers, so we only need to
9623 * check tiling flags when the FB doesn't have a modifier. */
9624 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9625 if (adev->family < AMDGPU_FAMILY_AI) {
9626 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9627 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9628 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9629 } else {
9630 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9631 }
9632 if (!linear) {
9633 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9634 return -EINVAL;
9635 }
9636 }
9637
9638 return 0;
9639 }
9640
9641 static int dm_update_plane_state(struct dc *dc,
9642 struct drm_atomic_state *state,
9643 struct drm_plane *plane,
9644 struct drm_plane_state *old_plane_state,
9645 struct drm_plane_state *new_plane_state,
9646 bool enable,
9647 bool *lock_and_validation_needed)
9648 {
9649
9650 struct dm_atomic_state *dm_state = NULL;
9651 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9652 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9653 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9654 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9655 struct amdgpu_crtc *new_acrtc;
9656 bool needs_reset;
9657 int ret = 0;
9658
9659
9660 new_plane_crtc = new_plane_state->crtc;
9661 old_plane_crtc = old_plane_state->crtc;
9662 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9663 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9664
9665 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9666 if (!enable || !new_plane_crtc ||
9667 drm_atomic_plane_disabling(plane->state, new_plane_state))
9668 return 0;
9669
9670 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9671
9672 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9673 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9674 return -EINVAL;
9675 }
9676
9677 if (new_plane_state->fb) {
9678 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9679 new_plane_state->fb);
9680 if (ret)
9681 return ret;
9682 }
9683
9684 return 0;
9685 }
9686
9687 needs_reset = should_reset_plane(state, plane, old_plane_state,
9688 new_plane_state);
9689
9690 /* Remove any changed/removed planes */
9691 if (!enable) {
9692 if (!needs_reset)
9693 return 0;
9694
9695 if (!old_plane_crtc)
9696 return 0;
9697
9698 old_crtc_state = drm_atomic_get_old_crtc_state(
9699 state, old_plane_crtc);
9700 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9701
9702 if (!dm_old_crtc_state->stream)
9703 return 0;
9704
9705 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9706 plane->base.id, old_plane_crtc->base.id);
9707
9708 ret = dm_atomic_get_state(state, &dm_state);
9709 if (ret)
9710 return ret;
9711
9712 if (!dc_remove_plane_from_context(
9713 dc,
9714 dm_old_crtc_state->stream,
9715 dm_old_plane_state->dc_state,
9716 dm_state->context)) {
9717
9718 return -EINVAL;
9719 }
9720
9721
9722 dc_plane_state_release(dm_old_plane_state->dc_state);
9723 dm_new_plane_state->dc_state = NULL;
9724
9725 *lock_and_validation_needed = true;
9726
9727 } else { /* Add new planes */
9728 struct dc_plane_state *dc_new_plane_state;
9729
9730 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9731 return 0;
9732
9733 if (!new_plane_crtc)
9734 return 0;
9735
9736 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9737 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9738
9739 if (!dm_new_crtc_state->stream)
9740 return 0;
9741
9742 if (!needs_reset)
9743 return 0;
9744
9745 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9746 if (ret)
9747 return ret;
9748
9749 WARN_ON(dm_new_plane_state->dc_state);
9750
9751 dc_new_plane_state = dc_create_plane_state(dc);
9752 if (!dc_new_plane_state)
9753 return -ENOMEM;
9754
9755 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9756 plane->base.id, new_plane_crtc->base.id);
9757
9758 ret = fill_dc_plane_attributes(
9759 drm_to_adev(new_plane_crtc->dev),
9760 dc_new_plane_state,
9761 new_plane_state,
9762 new_crtc_state);
9763 if (ret) {
9764 dc_plane_state_release(dc_new_plane_state);
9765 return ret;
9766 }
9767
9768 ret = dm_atomic_get_state(state, &dm_state);
9769 if (ret) {
9770 dc_plane_state_release(dc_new_plane_state);
9771 return ret;
9772 }
9773
9774 /*
9775 * Any atomic check errors that occur after this will
9776 * not need a release. The plane state will be attached
9777 * to the stream, and therefore part of the atomic
9778 * state. It'll be released when the atomic state is
9779 * cleaned.
9780 */
9781 if (!dc_add_plane_to_context(
9782 dc,
9783 dm_new_crtc_state->stream,
9784 dc_new_plane_state,
9785 dm_state->context)) {
9786
9787 dc_plane_state_release(dc_new_plane_state);
9788 return -EINVAL;
9789 }
9790
9791 dm_new_plane_state->dc_state = dc_new_plane_state;
9792
9793 /* Tell DC to do a full surface update every time there
9794 * is a plane change. Inefficient, but works for now.
9795 */
9796 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9797
9798 *lock_and_validation_needed = true;
9799 }
9800
9801
9802 return ret;
9803 }
9804
9805 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9806 struct drm_crtc *crtc,
9807 struct drm_crtc_state *new_crtc_state)
9808 {
9809 struct drm_plane_state *new_cursor_state, *new_primary_state;
9810 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9811
9812 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9813 * cursor per pipe but it's going to inherit the scaling and
9814 * positioning from the underlying pipe. Check the cursor plane's
9815 * blending properties match the primary plane's. */
9816
9817 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9818 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9819 if (!new_cursor_state || !new_primary_state ||
9820 !new_cursor_state->fb || !new_primary_state->fb) {
9821 return 0;
9822 }
9823
9824 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9825 (new_cursor_state->src_w >> 16);
9826 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9827 (new_cursor_state->src_h >> 16);
9828
9829 primary_scale_w = new_primary_state->crtc_w * 1000 /
9830 (new_primary_state->src_w >> 16);
9831 primary_scale_h = new_primary_state->crtc_h * 1000 /
9832 (new_primary_state->src_h >> 16);
9833
9834 if (cursor_scale_w != primary_scale_w ||
9835 cursor_scale_h != primary_scale_h) {
9836 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9837 return -EINVAL;
9838 }
9839
9840 return 0;
9841 }
9842
9843 #if defined(CONFIG_DRM_AMD_DC_DCN)
9844 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9845 {
9846 struct drm_connector *connector;
9847 struct drm_connector_state *conn_state;
9848 struct amdgpu_dm_connector *aconnector = NULL;
9849 int i;
9850 for_each_new_connector_in_state(state, connector, conn_state, i) {
9851 if (conn_state->crtc != crtc)
9852 continue;
9853
9854 aconnector = to_amdgpu_dm_connector(connector);
9855 if (!aconnector->port || !aconnector->mst_port)
9856 aconnector = NULL;
9857 else
9858 break;
9859 }
9860
9861 if (!aconnector)
9862 return 0;
9863
9864 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9865 }
9866 #endif
9867
9868 static int validate_overlay(struct drm_atomic_state *state)
9869 {
9870 int i;
9871 struct drm_plane *plane;
9872 struct drm_plane_state *old_plane_state, *new_plane_state;
9873 struct drm_plane_state *primary_state, *overlay_state = NULL;
9874
9875 /* Check if primary plane is contained inside overlay */
9876 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9877 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9878 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9879 return 0;
9880
9881 overlay_state = new_plane_state;
9882 continue;
9883 }
9884 }
9885
9886 /* check if we're making changes to the overlay plane */
9887 if (!overlay_state)
9888 return 0;
9889
9890 /* check if overlay plane is enabled */
9891 if (!overlay_state->crtc)
9892 return 0;
9893
9894 /* find the primary plane for the CRTC that the overlay is enabled on */
9895 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
9896 if (IS_ERR(primary_state))
9897 return PTR_ERR(primary_state);
9898
9899 /* check if primary plane is enabled */
9900 if (!primary_state->crtc)
9901 return 0;
9902
9903 /* Perform the bounds check to ensure the overlay plane covers the primary */
9904 if (primary_state->crtc_x < overlay_state->crtc_x ||
9905 primary_state->crtc_y < overlay_state->crtc_y ||
9906 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
9907 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
9908 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
9909 return -EINVAL;
9910 }
9911
9912 return 0;
9913 }
9914
9915 /**
9916 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9917 * @dev: The DRM device
9918 * @state: The atomic state to commit
9919 *
9920 * Validate that the given atomic state is programmable by DC into hardware.
9921 * This involves constructing a &struct dc_state reflecting the new hardware
9922 * state we wish to commit, then querying DC to see if it is programmable. It's
9923 * important not to modify the existing DC state. Otherwise, atomic_check
9924 * may unexpectedly commit hardware changes.
9925 *
9926 * When validating the DC state, it's important that the right locks are
9927 * acquired. For full updates case which removes/adds/updates streams on one
9928 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9929 * that any such full update commit will wait for completion of any outstanding
9930 * flip using DRMs synchronization events.
9931 *
9932 * Note that DM adds the affected connectors for all CRTCs in state, when that
9933 * might not seem necessary. This is because DC stream creation requires the
9934 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9935 * be possible but non-trivial - a possible TODO item.
9936 *
9937 * Return: -Error code if validation failed.
9938 */
9939 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9940 struct drm_atomic_state *state)
9941 {
9942 struct amdgpu_device *adev = drm_to_adev(dev);
9943 struct dm_atomic_state *dm_state = NULL;
9944 struct dc *dc = adev->dm.dc;
9945 struct drm_connector *connector;
9946 struct drm_connector_state *old_con_state, *new_con_state;
9947 struct drm_crtc *crtc;
9948 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9949 struct drm_plane *plane;
9950 struct drm_plane_state *old_plane_state, *new_plane_state;
9951 enum dc_status status;
9952 int ret, i;
9953 bool lock_and_validation_needed = false;
9954 struct dm_crtc_state *dm_old_crtc_state;
9955
9956 trace_amdgpu_dm_atomic_check_begin(state);
9957
9958 ret = drm_atomic_helper_check_modeset(dev, state);
9959 if (ret)
9960 goto fail;
9961
9962 /* Check connector changes */
9963 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9964 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9965 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9966
9967 /* Skip connectors that are disabled or part of modeset already. */
9968 if (!old_con_state->crtc && !new_con_state->crtc)
9969 continue;
9970
9971 if (!new_con_state->crtc)
9972 continue;
9973
9974 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9975 if (IS_ERR(new_crtc_state)) {
9976 ret = PTR_ERR(new_crtc_state);
9977 goto fail;
9978 }
9979
9980 if (dm_old_con_state->abm_level !=
9981 dm_new_con_state->abm_level)
9982 new_crtc_state->connectors_changed = true;
9983 }
9984
9985 #if defined(CONFIG_DRM_AMD_DC_DCN)
9986 if (dc_resource_is_dsc_encoding_supported(dc)) {
9987 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9988 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9989 ret = add_affected_mst_dsc_crtcs(state, crtc);
9990 if (ret)
9991 goto fail;
9992 }
9993 }
9994 }
9995 #endif
9996 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9997 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9998
9999 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10000 !new_crtc_state->color_mgmt_changed &&
10001 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10002 dm_old_crtc_state->dsc_force_changed == false)
10003 continue;
10004
10005 if (!new_crtc_state->enable)
10006 continue;
10007
10008 ret = drm_atomic_add_affected_connectors(state, crtc);
10009 if (ret)
10010 return ret;
10011
10012 ret = drm_atomic_add_affected_planes(state, crtc);
10013 if (ret)
10014 goto fail;
10015
10016 if (dm_old_crtc_state->dsc_force_changed)
10017 new_crtc_state->mode_changed = true;
10018 }
10019
10020 /*
10021 * Add all primary and overlay planes on the CRTC to the state
10022 * whenever a plane is enabled to maintain correct z-ordering
10023 * and to enable fast surface updates.
10024 */
10025 drm_for_each_crtc(crtc, dev) {
10026 bool modified = false;
10027
10028 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10029 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10030 continue;
10031
10032 if (new_plane_state->crtc == crtc ||
10033 old_plane_state->crtc == crtc) {
10034 modified = true;
10035 break;
10036 }
10037 }
10038
10039 if (!modified)
10040 continue;
10041
10042 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10043 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10044 continue;
10045
10046 new_plane_state =
10047 drm_atomic_get_plane_state(state, plane);
10048
10049 if (IS_ERR(new_plane_state)) {
10050 ret = PTR_ERR(new_plane_state);
10051 goto fail;
10052 }
10053 }
10054 }
10055
10056 /* Remove exiting planes if they are modified */
10057 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10058 ret = dm_update_plane_state(dc, state, plane,
10059 old_plane_state,
10060 new_plane_state,
10061 false,
10062 &lock_and_validation_needed);
10063 if (ret)
10064 goto fail;
10065 }
10066
10067 /* Disable all crtcs which require disable */
10068 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10069 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10070 old_crtc_state,
10071 new_crtc_state,
10072 false,
10073 &lock_and_validation_needed);
10074 if (ret)
10075 goto fail;
10076 }
10077
10078 /* Enable all crtcs which require enable */
10079 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10080 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10081 old_crtc_state,
10082 new_crtc_state,
10083 true,
10084 &lock_and_validation_needed);
10085 if (ret)
10086 goto fail;
10087 }
10088
10089 ret = validate_overlay(state);
10090 if (ret)
10091 goto fail;
10092
10093 /* Add new/modified planes */
10094 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10095 ret = dm_update_plane_state(dc, state, plane,
10096 old_plane_state,
10097 new_plane_state,
10098 true,
10099 &lock_and_validation_needed);
10100 if (ret)
10101 goto fail;
10102 }
10103
10104 /* Run this here since we want to validate the streams we created */
10105 ret = drm_atomic_helper_check_planes(dev, state);
10106 if (ret)
10107 goto fail;
10108
10109 /* Check cursor planes scaling */
10110 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10111 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10112 if (ret)
10113 goto fail;
10114 }
10115
10116 if (state->legacy_cursor_update) {
10117 /*
10118 * This is a fast cursor update coming from the plane update
10119 * helper, check if it can be done asynchronously for better
10120 * performance.
10121 */
10122 state->async_update =
10123 !drm_atomic_helper_async_check(dev, state);
10124
10125 /*
10126 * Skip the remaining global validation if this is an async
10127 * update. Cursor updates can be done without affecting
10128 * state or bandwidth calcs and this avoids the performance
10129 * penalty of locking the private state object and
10130 * allocating a new dc_state.
10131 */
10132 if (state->async_update)
10133 return 0;
10134 }
10135
10136 /* Check scaling and underscan changes*/
10137 /* TODO Removed scaling changes validation due to inability to commit
10138 * new stream into context w\o causing full reset. Need to
10139 * decide how to handle.
10140 */
10141 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10142 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10143 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10144 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10145
10146 /* Skip any modesets/resets */
10147 if (!acrtc || drm_atomic_crtc_needs_modeset(
10148 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10149 continue;
10150
10151 /* Skip any thing not scale or underscan changes */
10152 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10153 continue;
10154
10155 lock_and_validation_needed = true;
10156 }
10157
10158 /**
10159 * Streams and planes are reset when there are changes that affect
10160 * bandwidth. Anything that affects bandwidth needs to go through
10161 * DC global validation to ensure that the configuration can be applied
10162 * to hardware.
10163 *
10164 * We have to currently stall out here in atomic_check for outstanding
10165 * commits to finish in this case because our IRQ handlers reference
10166 * DRM state directly - we can end up disabling interrupts too early
10167 * if we don't.
10168 *
10169 * TODO: Remove this stall and drop DM state private objects.
10170 */
10171 if (lock_and_validation_needed) {
10172 ret = dm_atomic_get_state(state, &dm_state);
10173 if (ret)
10174 goto fail;
10175
10176 ret = do_aquire_global_lock(dev, state);
10177 if (ret)
10178 goto fail;
10179
10180 #if defined(CONFIG_DRM_AMD_DC_DCN)
10181 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10182 goto fail;
10183
10184 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10185 if (ret)
10186 goto fail;
10187 #endif
10188
10189 /*
10190 * Perform validation of MST topology in the state:
10191 * We need to perform MST atomic check before calling
10192 * dc_validate_global_state(), or there is a chance
10193 * to get stuck in an infinite loop and hang eventually.
10194 */
10195 ret = drm_dp_mst_atomic_check(state);
10196 if (ret)
10197 goto fail;
10198 status = dc_validate_global_state(dc, dm_state->context, false);
10199 if (status != DC_OK) {
10200 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10201 dc_status_to_str(status), status);
10202 ret = -EINVAL;
10203 goto fail;
10204 }
10205 } else {
10206 /*
10207 * The commit is a fast update. Fast updates shouldn't change
10208 * the DC context, affect global validation, and can have their
10209 * commit work done in parallel with other commits not touching
10210 * the same resource. If we have a new DC context as part of
10211 * the DM atomic state from validation we need to free it and
10212 * retain the existing one instead.
10213 *
10214 * Furthermore, since the DM atomic state only contains the DC
10215 * context and can safely be annulled, we can free the state
10216 * and clear the associated private object now to free
10217 * some memory and avoid a possible use-after-free later.
10218 */
10219
10220 for (i = 0; i < state->num_private_objs; i++) {
10221 struct drm_private_obj *obj = state->private_objs[i].ptr;
10222
10223 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10224 int j = state->num_private_objs-1;
10225
10226 dm_atomic_destroy_state(obj,
10227 state->private_objs[i].state);
10228
10229 /* If i is not at the end of the array then the
10230 * last element needs to be moved to where i was
10231 * before the array can safely be truncated.
10232 */
10233 if (i != j)
10234 state->private_objs[i] =
10235 state->private_objs[j];
10236
10237 state->private_objs[j].ptr = NULL;
10238 state->private_objs[j].state = NULL;
10239 state->private_objs[j].old_state = NULL;
10240 state->private_objs[j].new_state = NULL;
10241
10242 state->num_private_objs = j;
10243 break;
10244 }
10245 }
10246 }
10247
10248 /* Store the overall update type for use later in atomic check. */
10249 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10250 struct dm_crtc_state *dm_new_crtc_state =
10251 to_dm_crtc_state(new_crtc_state);
10252
10253 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10254 UPDATE_TYPE_FULL :
10255 UPDATE_TYPE_FAST;
10256 }
10257
10258 /* Must be success */
10259 WARN_ON(ret);
10260
10261 trace_amdgpu_dm_atomic_check_finish(state, ret);
10262
10263 return ret;
10264
10265 fail:
10266 if (ret == -EDEADLK)
10267 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10268 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10269 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10270 else
10271 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10272
10273 trace_amdgpu_dm_atomic_check_finish(state, ret);
10274
10275 return ret;
10276 }
10277
10278 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10279 struct amdgpu_dm_connector *amdgpu_dm_connector)
10280 {
10281 uint8_t dpcd_data;
10282 bool capable = false;
10283
10284 if (amdgpu_dm_connector->dc_link &&
10285 dm_helpers_dp_read_dpcd(
10286 NULL,
10287 amdgpu_dm_connector->dc_link,
10288 DP_DOWN_STREAM_PORT_COUNT,
10289 &dpcd_data,
10290 sizeof(dpcd_data))) {
10291 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10292 }
10293
10294 return capable;
10295 }
10296
10297 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10298 uint8_t *edid_ext, int len,
10299 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10300 {
10301 int i;
10302 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10303 struct dc *dc = adev->dm.dc;
10304
10305 /* send extension block to DMCU for parsing */
10306 for (i = 0; i < len; i += 8) {
10307 bool res;
10308 int offset;
10309
10310 /* send 8 bytes a time */
10311 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10312 return false;
10313
10314 if (i+8 == len) {
10315 /* EDID block sent completed, expect result */
10316 int version, min_rate, max_rate;
10317
10318 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10319 if (res) {
10320 /* amd vsdb found */
10321 vsdb_info->freesync_supported = 1;
10322 vsdb_info->amd_vsdb_version = version;
10323 vsdb_info->min_refresh_rate_hz = min_rate;
10324 vsdb_info->max_refresh_rate_hz = max_rate;
10325 return true;
10326 }
10327 /* not amd vsdb */
10328 return false;
10329 }
10330
10331 /* check for ack*/
10332 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10333 if (!res)
10334 return false;
10335 }
10336
10337 return false;
10338 }
10339
10340 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10341 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10342 {
10343 uint8_t *edid_ext = NULL;
10344 int i;
10345 bool valid_vsdb_found = false;
10346
10347 /*----- drm_find_cea_extension() -----*/
10348 /* No EDID or EDID extensions */
10349 if (edid == NULL || edid->extensions == 0)
10350 return -ENODEV;
10351
10352 /* Find CEA extension */
10353 for (i = 0; i < edid->extensions; i++) {
10354 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10355 if (edid_ext[0] == CEA_EXT)
10356 break;
10357 }
10358
10359 if (i == edid->extensions)
10360 return -ENODEV;
10361
10362 /*----- cea_db_offsets() -----*/
10363 if (edid_ext[0] != CEA_EXT)
10364 return -ENODEV;
10365
10366 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10367
10368 return valid_vsdb_found ? i : -ENODEV;
10369 }
10370
10371 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10372 struct edid *edid)
10373 {
10374 int i = 0;
10375 struct detailed_timing *timing;
10376 struct detailed_non_pixel *data;
10377 struct detailed_data_monitor_range *range;
10378 struct amdgpu_dm_connector *amdgpu_dm_connector =
10379 to_amdgpu_dm_connector(connector);
10380 struct dm_connector_state *dm_con_state = NULL;
10381
10382 struct drm_device *dev = connector->dev;
10383 struct amdgpu_device *adev = drm_to_adev(dev);
10384 bool freesync_capable = false;
10385 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10386
10387 if (!connector->state) {
10388 DRM_ERROR("%s - Connector has no state", __func__);
10389 goto update;
10390 }
10391
10392 if (!edid) {
10393 dm_con_state = to_dm_connector_state(connector->state);
10394
10395 amdgpu_dm_connector->min_vfreq = 0;
10396 amdgpu_dm_connector->max_vfreq = 0;
10397 amdgpu_dm_connector->pixel_clock_mhz = 0;
10398
10399 goto update;
10400 }
10401
10402 dm_con_state = to_dm_connector_state(connector->state);
10403
10404 if (!amdgpu_dm_connector->dc_sink) {
10405 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10406 goto update;
10407 }
10408 if (!adev->dm.freesync_module)
10409 goto update;
10410
10411
10412 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10413 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10414 bool edid_check_required = false;
10415
10416 if (edid) {
10417 edid_check_required = is_dp_capable_without_timing_msa(
10418 adev->dm.dc,
10419 amdgpu_dm_connector);
10420 }
10421
10422 if (edid_check_required == true && (edid->version > 1 ||
10423 (edid->version == 1 && edid->revision > 1))) {
10424 for (i = 0; i < 4; i++) {
10425
10426 timing = &edid->detailed_timings[i];
10427 data = &timing->data.other_data;
10428 range = &data->data.range;
10429 /*
10430 * Check if monitor has continuous frequency mode
10431 */
10432 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10433 continue;
10434 /*
10435 * Check for flag range limits only. If flag == 1 then
10436 * no additional timing information provided.
10437 * Default GTF, GTF Secondary curve and CVT are not
10438 * supported
10439 */
10440 if (range->flags != 1)
10441 continue;
10442
10443 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10444 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10445 amdgpu_dm_connector->pixel_clock_mhz =
10446 range->pixel_clock_mhz * 10;
10447
10448 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10449 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10450
10451 break;
10452 }
10453
10454 if (amdgpu_dm_connector->max_vfreq -
10455 amdgpu_dm_connector->min_vfreq > 10) {
10456
10457 freesync_capable = true;
10458 }
10459 }
10460 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10461 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10462 if (i >= 0 && vsdb_info.freesync_supported) {
10463 timing = &edid->detailed_timings[i];
10464 data = &timing->data.other_data;
10465
10466 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10467 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10468 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10469 freesync_capable = true;
10470
10471 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10472 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10473 }
10474 }
10475
10476 update:
10477 if (dm_con_state)
10478 dm_con_state->freesync_capable = freesync_capable;
10479
10480 if (connector->vrr_capable_property)
10481 drm_connector_set_vrr_capable_property(connector,
10482 freesync_capable);
10483 }
10484
10485 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10486 {
10487 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10488
10489 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10490 return;
10491 if (link->type == dc_connection_none)
10492 return;
10493 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10494 dpcd_data, sizeof(dpcd_data))) {
10495 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10496
10497 if (dpcd_data[0] == 0) {
10498 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10499 link->psr_settings.psr_feature_enabled = false;
10500 } else {
10501 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10502 link->psr_settings.psr_feature_enabled = true;
10503 }
10504
10505 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10506 }
10507 }
10508
10509 /*
10510 * amdgpu_dm_link_setup_psr() - configure psr link
10511 * @stream: stream state
10512 *
10513 * Return: true if success
10514 */
10515 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10516 {
10517 struct dc_link *link = NULL;
10518 struct psr_config psr_config = {0};
10519 struct psr_context psr_context = {0};
10520 bool ret = false;
10521
10522 if (stream == NULL)
10523 return false;
10524
10525 link = stream->link;
10526
10527 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10528
10529 if (psr_config.psr_version > 0) {
10530 psr_config.psr_exit_link_training_required = 0x1;
10531 psr_config.psr_frame_capture_indication_req = 0;
10532 psr_config.psr_rfb_setup_time = 0x37;
10533 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10534 psr_config.allow_smu_optimizations = 0x0;
10535
10536 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10537
10538 }
10539 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10540
10541 return ret;
10542 }
10543
10544 /*
10545 * amdgpu_dm_psr_enable() - enable psr f/w
10546 * @stream: stream state
10547 *
10548 * Return: true if success
10549 */
10550 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10551 {
10552 struct dc_link *link = stream->link;
10553 unsigned int vsync_rate_hz = 0;
10554 struct dc_static_screen_params params = {0};
10555 /* Calculate number of static frames before generating interrupt to
10556 * enter PSR.
10557 */
10558 // Init fail safe of 2 frames static
10559 unsigned int num_frames_static = 2;
10560
10561 DRM_DEBUG_DRIVER("Enabling psr...\n");
10562
10563 vsync_rate_hz = div64_u64(div64_u64((
10564 stream->timing.pix_clk_100hz * 100),
10565 stream->timing.v_total),
10566 stream->timing.h_total);
10567
10568 /* Round up
10569 * Calculate number of frames such that at least 30 ms of time has
10570 * passed.
10571 */
10572 if (vsync_rate_hz != 0) {
10573 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10574 num_frames_static = (30000 / frame_time_microsec) + 1;
10575 }
10576
10577 params.triggers.cursor_update = true;
10578 params.triggers.overlay_update = true;
10579 params.triggers.surface_update = true;
10580 params.num_frames = num_frames_static;
10581
10582 dc_stream_set_static_screen_params(link->ctx->dc,
10583 &stream, 1,
10584 &params);
10585
10586 return dc_link_set_psr_allow_active(link, true, false, false);
10587 }
10588
10589 /*
10590 * amdgpu_dm_psr_disable() - disable psr f/w
10591 * @stream: stream state
10592 *
10593 * Return: true if success
10594 */
10595 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10596 {
10597
10598 DRM_DEBUG_DRIVER("Disabling psr...\n");
10599
10600 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10601 }
10602
10603 /*
10604 * amdgpu_dm_psr_disable() - disable psr f/w
10605 * if psr is enabled on any stream
10606 *
10607 * Return: true if success
10608 */
10609 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10610 {
10611 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10612 return dc_set_psr_allow_active(dm->dc, false);
10613 }
10614
10615 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10616 {
10617 struct amdgpu_device *adev = drm_to_adev(dev);
10618 struct dc *dc = adev->dm.dc;
10619 int i;
10620
10621 mutex_lock(&adev->dm.dc_lock);
10622 if (dc->current_state) {
10623 for (i = 0; i < dc->current_state->stream_count; ++i)
10624 dc->current_state->streams[i]
10625 ->triggered_crtc_reset.enabled =
10626 adev->dm.force_timing_sync;
10627
10628 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10629 dc_trigger_sync(dc, dc->current_state);
10630 }
10631 mutex_unlock(&adev->dm.dc_lock);
10632 }
10633
10634 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10635 uint32_t value, const char *func_name)
10636 {
10637 #ifdef DM_CHECK_ADDR_0
10638 if (address == 0) {
10639 DC_ERR("invalid register write. address = 0");
10640 return;
10641 }
10642 #endif
10643 cgs_write_register(ctx->cgs_device, address, value);
10644 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10645 }
10646
10647 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10648 const char *func_name)
10649 {
10650 uint32_t value;
10651 #ifdef DM_CHECK_ADDR_0
10652 if (address == 0) {
10653 DC_ERR("invalid register read; address = 0\n");
10654 return 0;
10655 }
10656 #endif
10657
10658 if (ctx->dmub_srv &&
10659 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10660 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10661 ASSERT(false);
10662 return 0;
10663 }
10664
10665 value = cgs_read_register(ctx->cgs_device, address);
10666
10667 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10668
10669 return value;
10670 }