]>
Commit | Line | Data |
---|---|---|
4562236b HW |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: AMD | |
23 | * | |
24 | */ | |
25 | ||
26 | #include "dm_services_types.h" | |
27 | #include "dc.h" | |
28 | ||
29 | #include "vid.h" | |
30 | #include "amdgpu.h" | |
a49dcb88 | 31 | #include "amdgpu_display.h" |
4562236b HW |
32 | #include "atom.h" |
33 | #include "amdgpu_dm.h" | |
e7b07cee | 34 | #include "amdgpu_pm.h" |
4562236b HW |
35 | |
36 | #include "amd_shared.h" | |
37 | #include "amdgpu_dm_irq.h" | |
38 | #include "dm_helpers.h" | |
e7b07cee HW |
39 | #include "dm_services_types.h" |
40 | #include "amdgpu_dm_mst_types.h" | |
4562236b HW |
41 | |
42 | #include "ivsrcid/ivsrcid_vislands30.h" | |
43 | ||
44 | #include <linux/module.h> | |
45 | #include <linux/moduleparam.h> | |
46 | #include <linux/version.h> | |
e7b07cee | 47 | #include <linux/types.h> |
4562236b | 48 | |
e7b07cee | 49 | #include <drm/drmP.h> |
4562236b HW |
50 | #include <drm/drm_atomic.h> |
51 | #include <drm/drm_atomic_helper.h> | |
52 | #include <drm/drm_dp_mst_helper.h> | |
e7b07cee HW |
53 | #include <drm/drm_fb_helper.h> |
54 | #include <drm/drm_edid.h> | |
4562236b HW |
55 | |
56 | #include "modules/inc/mod_freesync.h" | |
57 | ||
ff5ef992 AD |
58 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
59 | #include "ivsrcid/irqsrcs_dcn_1_0.h" | |
60 | ||
61 | #include "raven1/DCN/dcn_1_0_offset.h" | |
62 | #include "raven1/DCN/dcn_1_0_sh_mask.h" | |
63 | #include "vega10/soc15ip.h" | |
64 | ||
65 | #include "soc15_common.h" | |
66 | #endif | |
67 | ||
e7b07cee HW |
68 | #include "modules/inc/mod_freesync.h" |
69 | ||
70 | #include "i2caux_interface.h" | |
71 | ||
72 | ||
d4e13b0d AD |
73 | static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = { |
74 | DRM_PLANE_TYPE_PRIMARY, | |
75 | DRM_PLANE_TYPE_PRIMARY, | |
76 | DRM_PLANE_TYPE_PRIMARY, | |
77 | DRM_PLANE_TYPE_PRIMARY, | |
78 | DRM_PLANE_TYPE_PRIMARY, | |
79 | DRM_PLANE_TYPE_PRIMARY, | |
80 | }; | |
81 | ||
82 | static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = { | |
83 | DRM_PLANE_TYPE_PRIMARY, | |
84 | DRM_PLANE_TYPE_PRIMARY, | |
85 | DRM_PLANE_TYPE_PRIMARY, | |
86 | DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */ | |
87 | }; | |
88 | ||
89 | static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = { | |
90 | DRM_PLANE_TYPE_PRIMARY, | |
91 | DRM_PLANE_TYPE_PRIMARY, | |
92 | DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */ | |
93 | }; | |
94 | ||
4562236b HW |
95 | /* |
96 | * dm_vblank_get_counter | |
97 | * | |
98 | * @brief | |
99 | * Get counter for number of vertical blanks | |
100 | * | |
101 | * @param | |
102 | * struct amdgpu_device *adev - [in] desired amdgpu device | |
103 | * int disp_idx - [in] which CRTC to get the counter from | |
104 | * | |
105 | * @return | |
106 | * Counter for vertical blanks | |
107 | */ | |
108 | static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) | |
109 | { | |
110 | if (crtc >= adev->mode_info.num_crtc) | |
111 | return 0; | |
112 | else { | |
113 | struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; | |
da5c47f6 AG |
114 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state( |
115 | acrtc->base.state); | |
4562236b | 116 | |
da5c47f6 AG |
117 | |
118 | if (acrtc_state->stream == NULL) { | |
ab2541b6 | 119 | DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc); |
4562236b HW |
120 | return 0; |
121 | } | |
122 | ||
da5c47f6 | 123 | return dc_stream_get_vblank_counter(acrtc_state->stream); |
4562236b HW |
124 | } |
125 | } | |
126 | ||
127 | static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, | |
128 | u32 *vbl, u32 *position) | |
129 | { | |
81c50963 ST |
130 | uint32_t v_blank_start, v_blank_end, h_position, v_position; |
131 | ||
4562236b HW |
132 | if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) |
133 | return -EINVAL; | |
134 | else { | |
135 | struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; | |
da5c47f6 AG |
136 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state( |
137 | acrtc->base.state); | |
4562236b | 138 | |
da5c47f6 | 139 | if (acrtc_state->stream == NULL) { |
ab2541b6 | 140 | DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc); |
4562236b HW |
141 | return 0; |
142 | } | |
143 | ||
81c50963 ST |
144 | /* |
145 | * TODO rework base driver to use values directly. | |
146 | * for now parse it back into reg-format | |
147 | */ | |
da5c47f6 | 148 | dc_stream_get_scanoutpos(acrtc_state->stream, |
81c50963 ST |
149 | &v_blank_start, |
150 | &v_blank_end, | |
151 | &h_position, | |
152 | &v_position); | |
153 | ||
e806208d AG |
154 | *position = v_position | (h_position << 16); |
155 | *vbl = v_blank_start | (v_blank_end << 16); | |
4562236b HW |
156 | } |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
161 | static bool dm_is_idle(void *handle) | |
162 | { | |
163 | /* XXX todo */ | |
164 | return true; | |
165 | } | |
166 | ||
167 | static int dm_wait_for_idle(void *handle) | |
168 | { | |
169 | /* XXX todo */ | |
170 | return 0; | |
171 | } | |
172 | ||
173 | static bool dm_check_soft_reset(void *handle) | |
174 | { | |
175 | return false; | |
176 | } | |
177 | ||
178 | static int dm_soft_reset(void *handle) | |
179 | { | |
180 | /* XXX todo */ | |
181 | return 0; | |
182 | } | |
183 | ||
184 | static struct amdgpu_crtc *get_crtc_by_otg_inst( | |
185 | struct amdgpu_device *adev, | |
186 | int otg_inst) | |
187 | { | |
188 | struct drm_device *dev = adev->ddev; | |
189 | struct drm_crtc *crtc; | |
190 | struct amdgpu_crtc *amdgpu_crtc; | |
191 | ||
192 | /* | |
193 | * following if is check inherited from both functions where this one is | |
194 | * used now. Need to be checked why it could happen. | |
195 | */ | |
196 | if (otg_inst == -1) { | |
197 | WARN_ON(1); | |
198 | return adev->mode_info.crtcs[0]; | |
199 | } | |
200 | ||
201 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
202 | amdgpu_crtc = to_amdgpu_crtc(crtc); | |
203 | ||
204 | if (amdgpu_crtc->otg_inst == otg_inst) | |
205 | return amdgpu_crtc; | |
206 | } | |
207 | ||
208 | return NULL; | |
209 | } | |
210 | ||
211 | static void dm_pflip_high_irq(void *interrupt_params) | |
212 | { | |
4562236b HW |
213 | struct amdgpu_crtc *amdgpu_crtc; |
214 | struct common_irq_params *irq_params = interrupt_params; | |
215 | struct amdgpu_device *adev = irq_params->adev; | |
216 | unsigned long flags; | |
217 | ||
218 | amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); | |
219 | ||
220 | /* IRQ could occur when in initial stage */ | |
221 | /*TODO work and BO cleanup */ | |
222 | if (amdgpu_crtc == NULL) { | |
223 | DRM_DEBUG_DRIVER("CRTC is null, returning.\n"); | |
224 | return; | |
225 | } | |
226 | ||
227 | spin_lock_irqsave(&adev->ddev->event_lock, flags); | |
4562236b HW |
228 | |
229 | if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ | |
230 | DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", | |
231 | amdgpu_crtc->pflip_status, | |
232 | AMDGPU_FLIP_SUBMITTED, | |
233 | amdgpu_crtc->crtc_id, | |
234 | amdgpu_crtc); | |
235 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | |
236 | return; | |
237 | } | |
238 | ||
4562236b HW |
239 | |
240 | /* wakeup usersapce */ | |
1159898a | 241 | if (amdgpu_crtc->event) { |
753c66c9 MK |
242 | /* Update to correct count/ts if racing with vblank irq */ |
243 | drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); | |
244 | ||
54f5499a | 245 | drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); |
1159898a | 246 | |
54f5499a AG |
247 | /* page flip completed. clean up */ |
248 | amdgpu_crtc->event = NULL; | |
1159898a | 249 | |
54f5499a AG |
250 | } else |
251 | WARN_ON(1); | |
4562236b | 252 | |
54f5499a | 253 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; |
4562236b HW |
254 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
255 | ||
54f5499a AG |
256 | DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n", |
257 | __func__, amdgpu_crtc->crtc_id, amdgpu_crtc); | |
4562236b HW |
258 | |
259 | drm_crtc_vblank_put(&amdgpu_crtc->base); | |
4562236b HW |
260 | } |
261 | ||
262 | static void dm_crtc_high_irq(void *interrupt_params) | |
263 | { | |
264 | struct common_irq_params *irq_params = interrupt_params; | |
265 | struct amdgpu_device *adev = irq_params->adev; | |
266 | uint8_t crtc_index = 0; | |
267 | struct amdgpu_crtc *acrtc; | |
268 | ||
b57de80a | 269 | acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); |
4562236b HW |
270 | |
271 | if (acrtc) | |
272 | crtc_index = acrtc->crtc_id; | |
273 | ||
274 | drm_handle_vblank(adev->ddev, crtc_index); | |
275 | } | |
276 | ||
277 | static int dm_set_clockgating_state(void *handle, | |
278 | enum amd_clockgating_state state) | |
279 | { | |
280 | return 0; | |
281 | } | |
282 | ||
283 | static int dm_set_powergating_state(void *handle, | |
284 | enum amd_powergating_state state) | |
285 | { | |
286 | return 0; | |
287 | } | |
288 | ||
289 | /* Prototypes of private functions */ | |
290 | static int dm_early_init(void* handle); | |
291 | ||
292 | static void hotplug_notify_work_func(struct work_struct *work) | |
293 | { | |
294 | struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work); | |
295 | struct drm_device *dev = dm->ddev; | |
296 | ||
297 | drm_kms_helper_hotplug_event(dev); | |
298 | } | |
299 | ||
300 | /* Init display KMS | |
301 | * | |
302 | * Returns 0 on success | |
303 | */ | |
304 | int amdgpu_dm_init(struct amdgpu_device *adev) | |
305 | { | |
306 | struct dc_init_data init_data; | |
307 | adev->dm.ddev = adev->ddev; | |
308 | adev->dm.adev = adev; | |
309 | ||
310 | DRM_INFO("DAL is enabled\n"); | |
311 | /* Zero all the fields */ | |
312 | memset(&init_data, 0, sizeof(init_data)); | |
313 | ||
314 | /* initialize DAL's lock (for SYNC context use) */ | |
315 | spin_lock_init(&adev->dm.dal_lock); | |
316 | ||
317 | /* initialize DAL's mutex */ | |
318 | mutex_init(&adev->dm.dal_mutex); | |
319 | ||
320 | if(amdgpu_dm_irq_init(adev)) { | |
321 | DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); | |
322 | goto error; | |
323 | } | |
324 | ||
325 | init_data.asic_id.chip_family = adev->family; | |
326 | ||
327 | init_data.asic_id.pci_revision_id = adev->rev_id; | |
328 | init_data.asic_id.hw_internal_rev = adev->external_rev_id; | |
329 | ||
330 | init_data.asic_id.vram_width = adev->mc.vram_width; | |
331 | /* TODO: initialize init_data.asic_id.vram_type here!!!! */ | |
332 | init_data.asic_id.atombios_base_address = | |
333 | adev->mode_info.atom_context->bios; | |
334 | ||
335 | init_data.driver = adev; | |
336 | ||
337 | adev->dm.cgs_device = amdgpu_cgs_create_device(adev); | |
338 | ||
339 | if (!adev->dm.cgs_device) { | |
340 | DRM_ERROR("amdgpu: failed to create cgs device.\n"); | |
341 | goto error; | |
342 | } | |
343 | ||
344 | init_data.cgs_device = adev->dm.cgs_device; | |
345 | ||
346 | adev->dm.dal = NULL; | |
347 | ||
348 | init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; | |
349 | ||
350 | /* Display Core create. */ | |
351 | adev->dm.dc = dc_create(&init_data); | |
352 | ||
353 | if (!adev->dm.dc) | |
354 | DRM_INFO("Display Core failed to initialize!\n"); | |
355 | ||
356 | INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func); | |
357 | ||
358 | adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); | |
359 | if (!adev->dm.freesync_module) { | |
360 | DRM_ERROR( | |
361 | "amdgpu: failed to initialize freesync_module.\n"); | |
362 | } else | |
363 | DRM_INFO("amdgpu: freesync_module init done %p.\n", | |
364 | adev->dm.freesync_module); | |
365 | ||
366 | if (amdgpu_dm_initialize_drm_device(adev)) { | |
367 | DRM_ERROR( | |
368 | "amdgpu: failed to initialize sw for display support.\n"); | |
369 | goto error; | |
370 | } | |
371 | ||
372 | /* Update the actual used number of crtc */ | |
373 | adev->mode_info.num_crtc = adev->dm.display_indexes_num; | |
374 | ||
375 | /* TODO: Add_display_info? */ | |
376 | ||
377 | /* TODO use dynamic cursor width */ | |
ce75805e AG |
378 | adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; |
379 | adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; | |
4562236b HW |
380 | |
381 | if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) { | |
382 | DRM_ERROR( | |
383 | "amdgpu: failed to initialize sw for display support.\n"); | |
384 | goto error; | |
385 | } | |
386 | ||
387 | DRM_INFO("KMS initialized.\n"); | |
388 | ||
389 | return 0; | |
390 | error: | |
391 | amdgpu_dm_fini(adev); | |
392 | ||
393 | return -1; | |
394 | } | |
395 | ||
396 | void amdgpu_dm_fini(struct amdgpu_device *adev) | |
397 | { | |
398 | amdgpu_dm_destroy_drm_device(&adev->dm); | |
399 | /* | |
400 | * TODO: pageflip, vlank interrupt | |
401 | * | |
402 | * amdgpu_dm_irq_fini(adev); | |
403 | */ | |
404 | ||
405 | if (adev->dm.cgs_device) { | |
406 | amdgpu_cgs_destroy_device(adev->dm.cgs_device); | |
407 | adev->dm.cgs_device = NULL; | |
408 | } | |
409 | if (adev->dm.freesync_module) { | |
410 | mod_freesync_destroy(adev->dm.freesync_module); | |
411 | adev->dm.freesync_module = NULL; | |
412 | } | |
413 | /* DC Destroy TODO: Replace destroy DAL */ | |
21de3396 | 414 | if (adev->dm.dc) |
4562236b | 415 | dc_destroy(&adev->dm.dc); |
4562236b HW |
416 | return; |
417 | } | |
418 | ||
419 | /* moved from amdgpu_dm_kms.c */ | |
420 | void amdgpu_dm_destroy() | |
421 | { | |
422 | } | |
423 | ||
424 | static int dm_sw_init(void *handle) | |
425 | { | |
426 | return 0; | |
427 | } | |
428 | ||
429 | static int dm_sw_fini(void *handle) | |
430 | { | |
431 | return 0; | |
432 | } | |
433 | ||
7abcf6b5 | 434 | static int detect_mst_link_for_all_connectors(struct drm_device *dev) |
4562236b HW |
435 | { |
436 | struct amdgpu_connector *aconnector; | |
437 | struct drm_connector *connector; | |
7abcf6b5 | 438 | int ret = 0; |
4562236b HW |
439 | |
440 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | |
441 | ||
442 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
443 | aconnector = to_amdgpu_connector(connector); | |
7abcf6b5 AG |
444 | if (aconnector->dc_link->type == dc_connection_mst_branch) { |
445 | DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", | |
446 | aconnector, aconnector->base.base.id); | |
447 | ||
448 | ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); | |
449 | if (ret < 0) { | |
450 | DRM_ERROR("DM_MST: Failed to start MST\n"); | |
451 | ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; | |
452 | return ret; | |
4562236b | 453 | } |
7abcf6b5 | 454 | } |
4562236b HW |
455 | } |
456 | ||
457 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | |
7abcf6b5 AG |
458 | return ret; |
459 | } | |
460 | ||
461 | static int dm_late_init(void *handle) | |
462 | { | |
463 | struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev; | |
464 | int r = detect_mst_link_for_all_connectors(dev); | |
465 | ||
466 | return r; | |
4562236b HW |
467 | } |
468 | ||
469 | static void s3_handle_mst(struct drm_device *dev, bool suspend) | |
470 | { | |
471 | struct amdgpu_connector *aconnector; | |
472 | struct drm_connector *connector; | |
473 | ||
474 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | |
475 | ||
476 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
477 | aconnector = to_amdgpu_connector(connector); | |
478 | if (aconnector->dc_link->type == dc_connection_mst_branch && | |
479 | !aconnector->mst_port) { | |
480 | ||
481 | if (suspend) | |
482 | drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); | |
483 | else | |
484 | drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); | |
485 | } | |
486 | } | |
487 | ||
488 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | |
489 | } | |
490 | ||
491 | static int dm_hw_init(void *handle) | |
492 | { | |
493 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
494 | /* Create DAL display manager */ | |
495 | amdgpu_dm_init(adev); | |
4562236b HW |
496 | amdgpu_dm_hpd_init(adev); |
497 | ||
4562236b HW |
498 | return 0; |
499 | } | |
500 | ||
501 | static int dm_hw_fini(void *handle) | |
502 | { | |
503 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
504 | ||
505 | amdgpu_dm_hpd_fini(adev); | |
506 | ||
507 | amdgpu_dm_irq_fini(adev); | |
21de3396 | 508 | amdgpu_dm_fini(adev); |
4562236b HW |
509 | return 0; |
510 | } | |
511 | ||
512 | static int dm_suspend(void *handle) | |
513 | { | |
514 | struct amdgpu_device *adev = handle; | |
515 | struct amdgpu_display_manager *dm = &adev->dm; | |
516 | int ret = 0; | |
4562236b HW |
517 | |
518 | s3_handle_mst(adev->ddev, true); | |
519 | ||
4562236b HW |
520 | amdgpu_dm_irq_suspend(adev); |
521 | ||
0a214e2f | 522 | WARN_ON(adev->dm.cached_state); |
a3621485 AG |
523 | adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); |
524 | ||
4562236b HW |
525 | dc_set_power_state( |
526 | dm->dc, | |
a3621485 AG |
527 | DC_ACPI_CM_POWER_STATE_D3 |
528 | ); | |
4562236b HW |
529 | |
530 | return ret; | |
531 | } | |
532 | ||
533 | struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector( | |
534 | struct drm_atomic_state *state, | |
535 | struct drm_crtc *crtc, | |
536 | bool from_state_var) | |
537 | { | |
538 | uint32_t i; | |
539 | struct drm_connector_state *conn_state; | |
540 | struct drm_connector *connector; | |
541 | struct drm_crtc *crtc_from_state; | |
542 | ||
543 | for_each_connector_in_state( | |
544 | state, | |
545 | connector, | |
546 | conn_state, | |
547 | i) { | |
548 | crtc_from_state = | |
549 | from_state_var ? | |
550 | conn_state->crtc : | |
551 | connector->state->crtc; | |
552 | ||
553 | if (crtc_from_state == crtc) | |
554 | return to_amdgpu_connector(connector); | |
555 | } | |
556 | ||
557 | return NULL; | |
558 | } | |
559 | ||
4562236b HW |
560 | static int dm_resume(void *handle) |
561 | { | |
562 | struct amdgpu_device *adev = handle; | |
563 | struct amdgpu_display_manager *dm = &adev->dm; | |
564 | ||
565 | /* power on hardware */ | |
566 | dc_set_power_state( | |
567 | dm->dc, | |
a3621485 AG |
568 | DC_ACPI_CM_POWER_STATE_D0 |
569 | ); | |
4562236b HW |
570 | |
571 | return 0; | |
572 | } | |
573 | ||
574 | int amdgpu_dm_display_resume(struct amdgpu_device *adev ) | |
575 | { | |
576 | struct drm_device *ddev = adev->ddev; | |
577 | struct amdgpu_display_manager *dm = &adev->dm; | |
578 | struct amdgpu_connector *aconnector; | |
579 | struct drm_connector *connector; | |
4562236b | 580 | struct drm_crtc *crtc; |
a3621485 AG |
581 | struct drm_crtc_state *crtc_state; |
582 | int ret = 0; | |
583 | int i; | |
4562236b HW |
584 | |
585 | /* program HPD filter */ | |
586 | dc_resume(dm->dc); | |
587 | ||
588 | /* On resume we need to rewrite the MSTM control bits to enamble MST*/ | |
589 | s3_handle_mst(ddev, false); | |
590 | ||
591 | /* | |
592 | * early enable HPD Rx IRQ, should be done before set mode as short | |
593 | * pulse interrupts are used for MST | |
594 | */ | |
595 | amdgpu_dm_irq_resume_early(adev); | |
596 | ||
4562236b HW |
597 | /* Do detection*/ |
598 | list_for_each_entry(connector, | |
599 | &ddev->mode_config.connector_list, head) { | |
600 | aconnector = to_amdgpu_connector(connector); | |
601 | ||
602 | /* | |
603 | * this is the case when traversing through already created | |
604 | * MST connectors, should be skipped | |
605 | */ | |
606 | if (aconnector->mst_port) | |
607 | continue; | |
608 | ||
03ea364c | 609 | mutex_lock(&aconnector->hpd_lock); |
4562236b HW |
610 | dc_link_detect(aconnector->dc_link, false); |
611 | aconnector->dc_sink = NULL; | |
612 | amdgpu_dm_update_connector_after_detect(aconnector); | |
03ea364c | 613 | mutex_unlock(&aconnector->hpd_lock); |
4562236b HW |
614 | } |
615 | ||
a3621485 AG |
616 | /* Force mode set in atomic comit */ |
617 | for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i) | |
618 | crtc_state->active_changed = true; | |
619 | ||
620 | ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); | |
4562236b | 621 | |
0a214e2f AG |
622 | drm_atomic_state_put(adev->dm.cached_state); |
623 | adev->dm.cached_state = NULL; | |
624 | ||
9faa4237 | 625 | amdgpu_dm_irq_resume_late(adev); |
4562236b HW |
626 | |
627 | return ret; | |
628 | } | |
629 | ||
630 | static const struct amd_ip_funcs amdgpu_dm_funcs = { | |
631 | .name = "dm", | |
632 | .early_init = dm_early_init, | |
7abcf6b5 | 633 | .late_init = dm_late_init, |
4562236b HW |
634 | .sw_init = dm_sw_init, |
635 | .sw_fini = dm_sw_fini, | |
636 | .hw_init = dm_hw_init, | |
637 | .hw_fini = dm_hw_fini, | |
638 | .suspend = dm_suspend, | |
639 | .resume = dm_resume, | |
640 | .is_idle = dm_is_idle, | |
641 | .wait_for_idle = dm_wait_for_idle, | |
642 | .check_soft_reset = dm_check_soft_reset, | |
643 | .soft_reset = dm_soft_reset, | |
644 | .set_clockgating_state = dm_set_clockgating_state, | |
645 | .set_powergating_state = dm_set_powergating_state, | |
646 | }; | |
647 | ||
648 | const struct amdgpu_ip_block_version dm_ip_block = | |
649 | { | |
650 | .type = AMD_IP_BLOCK_TYPE_DCE, | |
651 | .major = 1, | |
652 | .minor = 0, | |
653 | .rev = 0, | |
654 | .funcs = &amdgpu_dm_funcs, | |
655 | }; | |
656 | ||
ca3268c4 HW |
657 | |
658 | struct drm_atomic_state * | |
659 | dm_atomic_state_alloc(struct drm_device *dev) | |
660 | { | |
661 | struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); | |
662 | ||
663 | if (!state || drm_atomic_state_init(dev, &state->base) < 0) { | |
664 | kfree(state); | |
665 | return NULL; | |
666 | } | |
667 | ||
668 | return &state->base; | |
669 | } | |
670 | ||
0a323b84 AG |
671 | static void |
672 | dm_atomic_state_clear(struct drm_atomic_state *state) | |
673 | { | |
674 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | |
675 | ||
676 | if (dm_state->context) { | |
677 | dc_release_validate_context(dm_state->context); | |
678 | dm_state->context = NULL; | |
679 | } | |
680 | ||
681 | drm_atomic_state_default_clear(state); | |
682 | } | |
683 | ||
684 | static void | |
685 | dm_atomic_state_alloc_free(struct drm_atomic_state *state) | |
686 | { | |
687 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); | |
688 | drm_atomic_state_default_release(state); | |
689 | kfree(dm_state); | |
690 | } | |
691 | ||
b3663f70 | 692 | static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { |
a49dcb88 HW |
693 | .fb_create = amdgpu_user_framebuffer_create, |
694 | .output_poll_changed = amdgpu_output_poll_changed, | |
4562236b | 695 | .atomic_check = amdgpu_dm_atomic_check, |
da5c47f6 | 696 | .atomic_commit = amdgpu_dm_atomic_commit, |
ca3268c4 | 697 | .atomic_state_alloc = dm_atomic_state_alloc, |
0a323b84 AG |
698 | .atomic_state_clear = dm_atomic_state_clear, |
699 | .atomic_state_free = dm_atomic_state_alloc_free | |
54f5499a AG |
700 | }; |
701 | ||
702 | static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { | |
703 | .atomic_commit_tail = amdgpu_dm_atomic_commit_tail | |
4562236b HW |
704 | }; |
705 | ||
706 | void amdgpu_dm_update_connector_after_detect( | |
707 | struct amdgpu_connector *aconnector) | |
708 | { | |
709 | struct drm_connector *connector = &aconnector->base; | |
710 | struct drm_device *dev = connector->dev; | |
b73a22d3 | 711 | struct dc_sink *sink; |
4562236b HW |
712 | |
713 | /* MST handled by drm_mst framework */ | |
714 | if (aconnector->mst_mgr.mst_state == true) | |
715 | return; | |
716 | ||
717 | ||
718 | sink = aconnector->dc_link->local_sink; | |
719 | ||
720 | /* Edid mgmt connector gets first update only in mode_valid hook and then | |
721 | * the connector sink is set to either fake or physical sink depends on link status. | |
722 | * don't do it here if u are during boot | |
723 | */ | |
724 | if (aconnector->base.force != DRM_FORCE_UNSPECIFIED | |
725 | && aconnector->dc_em_sink) { | |
726 | ||
ab2541b6 | 727 | /* For S3 resume with headless use eml_sink to fake stream |
4562236b HW |
728 | * because on resume connecotr->sink is set ti NULL |
729 | */ | |
730 | mutex_lock(&dev->mode_config.mutex); | |
731 | ||
732 | if (sink) { | |
922aa1e1 | 733 | if (aconnector->dc_sink) { |
4562236b HW |
734 | amdgpu_dm_remove_sink_from_freesync_module( |
735 | connector); | |
922aa1e1 AG |
736 | /* retain and release bellow are used for |
737 | * bump up refcount for sink because the link don't point | |
738 | * to it anymore after disconnect so on next crtc to connector | |
739 | * reshuffle by UMD we will get into unwanted dc_sink release | |
740 | */ | |
741 | if (aconnector->dc_sink != aconnector->dc_em_sink) | |
742 | dc_sink_release(aconnector->dc_sink); | |
743 | } | |
4562236b HW |
744 | aconnector->dc_sink = sink; |
745 | amdgpu_dm_add_sink_to_freesync_module( | |
746 | connector, aconnector->edid); | |
747 | } else { | |
748 | amdgpu_dm_remove_sink_from_freesync_module(connector); | |
749 | if (!aconnector->dc_sink) | |
750 | aconnector->dc_sink = aconnector->dc_em_sink; | |
922aa1e1 AG |
751 | else if (aconnector->dc_sink != aconnector->dc_em_sink) |
752 | dc_sink_retain(aconnector->dc_sink); | |
4562236b HW |
753 | } |
754 | ||
755 | mutex_unlock(&dev->mode_config.mutex); | |
756 | return; | |
757 | } | |
758 | ||
759 | /* | |
760 | * TODO: temporary guard to look for proper fix | |
761 | * if this sink is MST sink, we should not do anything | |
762 | */ | |
763 | if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) | |
764 | return; | |
765 | ||
766 | if (aconnector->dc_sink == sink) { | |
767 | /* We got a DP short pulse (Link Loss, DP CTS, etc...). | |
768 | * Do nothing!! */ | |
769 | DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n", | |
770 | aconnector->connector_id); | |
771 | return; | |
772 | } | |
773 | ||
774 | DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", | |
775 | aconnector->connector_id, aconnector->dc_sink, sink); | |
776 | ||
777 | mutex_lock(&dev->mode_config.mutex); | |
778 | ||
779 | /* 1. Update status of the drm connector | |
780 | * 2. Send an event and let userspace tell us what to do */ | |
781 | if (sink) { | |
782 | /* TODO: check if we still need the S3 mode update workaround. | |
783 | * If yes, put it here. */ | |
784 | if (aconnector->dc_sink) | |
785 | amdgpu_dm_remove_sink_from_freesync_module( | |
786 | connector); | |
787 | ||
788 | aconnector->dc_sink = sink; | |
789 | if (sink->dc_edid.length == 0) | |
790 | aconnector->edid = NULL; | |
791 | else { | |
792 | aconnector->edid = | |
793 | (struct edid *) sink->dc_edid.raw_edid; | |
794 | ||
795 | ||
796 | drm_mode_connector_update_edid_property(connector, | |
797 | aconnector->edid); | |
798 | } | |
799 | amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid); | |
800 | ||
801 | } else { | |
802 | amdgpu_dm_remove_sink_from_freesync_module(connector); | |
803 | drm_mode_connector_update_edid_property(connector, NULL); | |
804 | aconnector->num_modes = 0; | |
805 | aconnector->dc_sink = NULL; | |
806 | } | |
807 | ||
808 | mutex_unlock(&dev->mode_config.mutex); | |
809 | } | |
810 | ||
811 | static void handle_hpd_irq(void *param) | |
812 | { | |
813 | struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param; | |
814 | struct drm_connector *connector = &aconnector->base; | |
815 | struct drm_device *dev = connector->dev; | |
816 | ||
817 | /* In case of failure or MST no need to update connector status or notify the OS | |
818 | * since (for MST case) MST does this in it's own context. | |
819 | */ | |
820 | mutex_lock(&aconnector->hpd_lock); | |
821 | if (dc_link_detect(aconnector->dc_link, false)) { | |
822 | amdgpu_dm_update_connector_after_detect(aconnector); | |
823 | ||
824 | ||
825 | drm_modeset_lock_all(dev); | |
826 | dm_restore_drm_connector_state(dev, connector); | |
827 | drm_modeset_unlock_all(dev); | |
828 | ||
829 | if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) | |
830 | drm_kms_helper_hotplug_event(dev); | |
831 | } | |
832 | mutex_unlock(&aconnector->hpd_lock); | |
833 | ||
834 | } | |
835 | ||
836 | static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector) | |
837 | { | |
838 | uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; | |
839 | uint8_t dret; | |
840 | bool new_irq_handled = false; | |
841 | int dpcd_addr; | |
842 | int dpcd_bytes_to_read; | |
843 | ||
844 | const int max_process_count = 30; | |
845 | int process_count = 0; | |
846 | ||
847 | const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); | |
848 | ||
849 | if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { | |
850 | dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; | |
851 | /* DPCD 0x200 - 0x201 for downstream IRQ */ | |
852 | dpcd_addr = DP_SINK_COUNT; | |
853 | } else { | |
854 | dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; | |
855 | /* DPCD 0x2002 - 0x2005 for downstream IRQ */ | |
856 | dpcd_addr = DP_SINK_COUNT_ESI; | |
857 | } | |
858 | ||
859 | dret = drm_dp_dpcd_read( | |
860 | &aconnector->dm_dp_aux.aux, | |
861 | dpcd_addr, | |
862 | esi, | |
863 | dpcd_bytes_to_read); | |
864 | ||
865 | while (dret == dpcd_bytes_to_read && | |
866 | process_count < max_process_count) { | |
867 | uint8_t retry; | |
868 | dret = 0; | |
869 | ||
870 | process_count++; | |
871 | ||
872 | DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); | |
4562236b HW |
873 | /* handle HPD short pulse irq */ |
874 | if (aconnector->mst_mgr.mst_state) | |
875 | drm_dp_mst_hpd_irq( | |
876 | &aconnector->mst_mgr, | |
877 | esi, | |
878 | &new_irq_handled); | |
4562236b HW |
879 | |
880 | if (new_irq_handled) { | |
881 | /* ACK at DPCD to notify down stream */ | |
882 | const int ack_dpcd_bytes_to_write = | |
883 | dpcd_bytes_to_read - 1; | |
884 | ||
885 | for (retry = 0; retry < 3; retry++) { | |
886 | uint8_t wret; | |
887 | ||
888 | wret = drm_dp_dpcd_write( | |
889 | &aconnector->dm_dp_aux.aux, | |
890 | dpcd_addr + 1, | |
891 | &esi[1], | |
892 | ack_dpcd_bytes_to_write); | |
893 | if (wret == ack_dpcd_bytes_to_write) | |
894 | break; | |
895 | } | |
896 | ||
897 | /* check if there is new irq to be handle */ | |
898 | dret = drm_dp_dpcd_read( | |
899 | &aconnector->dm_dp_aux.aux, | |
900 | dpcd_addr, | |
901 | esi, | |
902 | dpcd_bytes_to_read); | |
903 | ||
904 | new_irq_handled = false; | |
905 | } else | |
906 | break; | |
907 | } | |
908 | ||
909 | if (process_count == max_process_count) | |
910 | DRM_DEBUG_KMS("Loop exceeded max iterations\n"); | |
911 | } | |
912 | ||
913 | static void handle_hpd_rx_irq(void *param) | |
914 | { | |
915 | struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param; | |
916 | struct drm_connector *connector = &aconnector->base; | |
917 | struct drm_device *dev = connector->dev; | |
918 | const struct dc_link *dc_link = aconnector->dc_link; | |
919 | bool is_mst_root_connector = aconnector->mst_mgr.mst_state; | |
920 | ||
921 | /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio | |
922 | * conflict, after implement i2c helper, this mutex should be | |
923 | * retired. | |
924 | */ | |
925 | if (aconnector->dc_link->type != dc_connection_mst_branch) | |
926 | mutex_lock(&aconnector->hpd_lock); | |
927 | ||
8ee65d7c | 928 | if (dc_link_handle_hpd_rx_irq(aconnector->dc_link, NULL) && |
4562236b HW |
929 | !is_mst_root_connector) { |
930 | /* Downstream Port status changed. */ | |
931 | if (dc_link_detect(aconnector->dc_link, false)) { | |
932 | amdgpu_dm_update_connector_after_detect(aconnector); | |
933 | ||
934 | ||
935 | drm_modeset_lock_all(dev); | |
936 | dm_restore_drm_connector_state(dev, connector); | |
937 | drm_modeset_unlock_all(dev); | |
938 | ||
939 | drm_kms_helper_hotplug_event(dev); | |
940 | } | |
941 | } | |
942 | if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || | |
943 | (dc_link->type == dc_connection_mst_branch)) | |
944 | dm_handle_hpd_rx_irq(aconnector); | |
945 | ||
946 | if (aconnector->dc_link->type != dc_connection_mst_branch) | |
947 | mutex_unlock(&aconnector->hpd_lock); | |
948 | } | |
949 | ||
950 | static void register_hpd_handlers(struct amdgpu_device *adev) | |
951 | { | |
952 | struct drm_device *dev = adev->ddev; | |
953 | struct drm_connector *connector; | |
954 | struct amdgpu_connector *aconnector; | |
955 | const struct dc_link *dc_link; | |
956 | struct dc_interrupt_params int_params = {0}; | |
957 | ||
958 | int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; | |
959 | int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; | |
960 | ||
961 | list_for_each_entry(connector, | |
962 | &dev->mode_config.connector_list, head) { | |
963 | ||
964 | aconnector = to_amdgpu_connector(connector); | |
965 | dc_link = aconnector->dc_link; | |
966 | ||
967 | if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { | |
968 | int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; | |
969 | int_params.irq_source = dc_link->irq_source_hpd; | |
970 | ||
971 | amdgpu_dm_irq_register_interrupt(adev, &int_params, | |
972 | handle_hpd_irq, | |
973 | (void *) aconnector); | |
974 | } | |
975 | ||
976 | if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { | |
977 | ||
978 | /* Also register for DP short pulse (hpd_rx). */ | |
979 | int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; | |
980 | int_params.irq_source = dc_link->irq_source_hpd_rx; | |
981 | ||
982 | amdgpu_dm_irq_register_interrupt(adev, &int_params, | |
983 | handle_hpd_rx_irq, | |
984 | (void *) aconnector); | |
985 | } | |
986 | } | |
987 | } | |
988 | ||
989 | /* Register IRQ sources and initialize IRQ callbacks */ | |
990 | static int dce110_register_irq_handlers(struct amdgpu_device *adev) | |
991 | { | |
992 | struct dc *dc = adev->dm.dc; | |
993 | struct common_irq_params *c_irq_params; | |
994 | struct dc_interrupt_params int_params = {0}; | |
995 | int r; | |
996 | int i; | |
2c8ad2d5 AD |
997 | unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY; |
998 | ||
ff5ef992 AD |
999 | if (adev->asic_type == CHIP_VEGA10 || |
1000 | adev->asic_type == CHIP_RAVEN) | |
2c8ad2d5 | 1001 | client_id = AMDGPU_IH_CLIENTID_DCE; |
4562236b HW |
1002 | |
1003 | int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; | |
1004 | int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; | |
1005 | ||
1006 | /* Actions of amdgpu_irq_add_id(): | |
1007 | * 1. Register a set() function with base driver. | |
1008 | * Base driver will call set() function to enable/disable an | |
1009 | * interrupt in DC hardware. | |
1010 | * 2. Register amdgpu_dm_irq_handler(). | |
1011 | * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts | |
1012 | * coming from DC hardware. | |
1013 | * amdgpu_dm_irq_handler() will re-direct the interrupt to DC | |
1014 | * for acknowledging and handling. */ | |
1015 | ||
b57de80a | 1016 | /* Use VBLANK interrupt */ |
e9029155 | 1017 | for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { |
2c8ad2d5 | 1018 | r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); |
4562236b HW |
1019 | if (r) { |
1020 | DRM_ERROR("Failed to add crtc irq id!\n"); | |
1021 | return r; | |
1022 | } | |
1023 | ||
1024 | int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; | |
1025 | int_params.irq_source = | |
3d761e79 | 1026 | dc_interrupt_to_irq_source(dc, i, 0); |
4562236b | 1027 | |
b57de80a | 1028 | c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; |
4562236b HW |
1029 | |
1030 | c_irq_params->adev = adev; | |
1031 | c_irq_params->irq_src = int_params.irq_source; | |
1032 | ||
1033 | amdgpu_dm_irq_register_interrupt(adev, &int_params, | |
1034 | dm_crtc_high_irq, c_irq_params); | |
1035 | } | |
1036 | ||
3d761e79 | 1037 | /* Use GRPH_PFLIP interrupt */ |
4562236b HW |
1038 | for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; |
1039 | i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { | |
2c8ad2d5 | 1040 | r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); |
4562236b HW |
1041 | if (r) { |
1042 | DRM_ERROR("Failed to add page flip irq id!\n"); | |
1043 | return r; | |
1044 | } | |
1045 | ||
1046 | int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; | |
1047 | int_params.irq_source = | |
1048 | dc_interrupt_to_irq_source(dc, i, 0); | |
1049 | ||
1050 | c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; | |
1051 | ||
1052 | c_irq_params->adev = adev; | |
1053 | c_irq_params->irq_src = int_params.irq_source; | |
1054 | ||
1055 | amdgpu_dm_irq_register_interrupt(adev, &int_params, | |
1056 | dm_pflip_high_irq, c_irq_params); | |
1057 | ||
1058 | } | |
1059 | ||
1060 | /* HPD */ | |
2c8ad2d5 AD |
1061 | r = amdgpu_irq_add_id(adev, client_id, |
1062 | VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); | |
4562236b HW |
1063 | if (r) { |
1064 | DRM_ERROR("Failed to add hpd irq id!\n"); | |
1065 | return r; | |
1066 | } | |
1067 | ||
1068 | register_hpd_handlers(adev); | |
1069 | ||
1070 | return 0; | |
1071 | } | |
1072 | ||
ff5ef992 AD |
1073 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
1074 | /* Register IRQ sources and initialize IRQ callbacks */ | |
1075 | static int dcn10_register_irq_handlers(struct amdgpu_device *adev) | |
1076 | { | |
1077 | struct dc *dc = adev->dm.dc; | |
1078 | struct common_irq_params *c_irq_params; | |
1079 | struct dc_interrupt_params int_params = {0}; | |
1080 | int r; | |
1081 | int i; | |
1082 | ||
1083 | int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; | |
1084 | int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; | |
1085 | ||
1086 | /* Actions of amdgpu_irq_add_id(): | |
1087 | * 1. Register a set() function with base driver. | |
1088 | * Base driver will call set() function to enable/disable an | |
1089 | * interrupt in DC hardware. | |
1090 | * 2. Register amdgpu_dm_irq_handler(). | |
1091 | * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts | |
1092 | * coming from DC hardware. | |
1093 | * amdgpu_dm_irq_handler() will re-direct the interrupt to DC | |
1094 | * for acknowledging and handling. | |
1095 | * */ | |
1096 | ||
1097 | /* Use VSTARTUP interrupt */ | |
1098 | for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; | |
1099 | i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; | |
1100 | i++) { | |
1101 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq); | |
1102 | ||
1103 | if (r) { | |
1104 | DRM_ERROR("Failed to add crtc irq id!\n"); | |
1105 | return r; | |
1106 | } | |
1107 | ||
1108 | int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; | |
1109 | int_params.irq_source = | |
1110 | dc_interrupt_to_irq_source(dc, i, 0); | |
1111 | ||
1112 | c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; | |
1113 | ||
1114 | c_irq_params->adev = adev; | |
1115 | c_irq_params->irq_src = int_params.irq_source; | |
1116 | ||
1117 | amdgpu_dm_irq_register_interrupt(adev, &int_params, | |
1118 | dm_crtc_high_irq, c_irq_params); | |
1119 | } | |
1120 | ||
1121 | /* Use GRPH_PFLIP interrupt */ | |
1122 | for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; | |
1123 | i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; | |
1124 | i++) { | |
1125 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq); | |
1126 | if (r) { | |
1127 | DRM_ERROR("Failed to add page flip irq id!\n"); | |
1128 | return r; | |
1129 | } | |
1130 | ||
1131 | int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; | |
1132 | int_params.irq_source = | |
1133 | dc_interrupt_to_irq_source(dc, i, 0); | |
1134 | ||
1135 | c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; | |
1136 | ||
1137 | c_irq_params->adev = adev; | |
1138 | c_irq_params->irq_src = int_params.irq_source; | |
1139 | ||
1140 | amdgpu_dm_irq_register_interrupt(adev, &int_params, | |
1141 | dm_pflip_high_irq, c_irq_params); | |
1142 | ||
1143 | } | |
1144 | ||
1145 | /* HPD */ | |
1146 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, | |
1147 | &adev->hpd_irq); | |
1148 | if (r) { | |
1149 | DRM_ERROR("Failed to add hpd irq id!\n"); | |
1150 | return r; | |
1151 | } | |
1152 | ||
1153 | register_hpd_handlers(adev); | |
1154 | ||
1155 | return 0; | |
1156 | } | |
1157 | #endif | |
1158 | ||
4562236b HW |
1159 | static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) |
1160 | { | |
1161 | int r; | |
1162 | ||
1163 | adev->mode_info.mode_config_initialized = true; | |
1164 | ||
4562236b | 1165 | adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; |
54f5499a | 1166 | adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; |
4562236b HW |
1167 | |
1168 | adev->ddev->mode_config.max_width = 16384; | |
1169 | adev->ddev->mode_config.max_height = 16384; | |
1170 | ||
1171 | adev->ddev->mode_config.preferred_depth = 24; | |
1172 | adev->ddev->mode_config.prefer_shadow = 1; | |
1173 | /* indicate support of immediate flip */ | |
1174 | adev->ddev->mode_config.async_page_flip = true; | |
1175 | ||
1176 | adev->ddev->mode_config.fb_base = adev->mc.aper_base; | |
1177 | ||
1178 | r = amdgpu_modeset_create_props(adev); | |
1179 | if (r) | |
1180 | return r; | |
1181 | ||
1182 | return 0; | |
1183 | } | |
1184 | ||
1185 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ | |
1186 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | |
1187 | ||
1188 | static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) | |
1189 | { | |
1190 | struct amdgpu_display_manager *dm = bl_get_data(bd); | |
1191 | ||
1192 | if (dc_link_set_backlight_level(dm->backlight_link, | |
1193 | bd->props.brightness, 0, 0)) | |
1194 | return 0; | |
1195 | else | |
1196 | return 1; | |
1197 | } | |
1198 | ||
1199 | static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) | |
1200 | { | |
1201 | return bd->props.brightness; | |
1202 | } | |
1203 | ||
1204 | static const struct backlight_ops amdgpu_dm_backlight_ops = { | |
1205 | .get_brightness = amdgpu_dm_backlight_get_brightness, | |
1206 | .update_status = amdgpu_dm_backlight_update_status, | |
1207 | }; | |
1208 | ||
1209 | void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) | |
1210 | { | |
1211 | char bl_name[16]; | |
1212 | struct backlight_properties props = { 0 }; | |
1213 | ||
1214 | props.max_brightness = AMDGPU_MAX_BL_LEVEL; | |
1215 | props.type = BACKLIGHT_RAW; | |
1216 | ||
1217 | snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", | |
1218 | dm->adev->ddev->primary->index); | |
1219 | ||
1220 | dm->backlight_dev = backlight_device_register(bl_name, | |
1221 | dm->adev->ddev->dev, | |
1222 | dm, | |
1223 | &amdgpu_dm_backlight_ops, | |
1224 | &props); | |
1225 | ||
1226 | if (NULL == dm->backlight_dev) | |
1227 | DRM_ERROR("DM: Backlight registration failed!\n"); | |
1228 | else | |
1229 | DRM_INFO("DM: Registered Backlight device: %s\n", bl_name); | |
1230 | } | |
1231 | ||
1232 | #endif | |
1233 | ||
1234 | /* In this architecture, the association | |
1235 | * connector -> encoder -> crtc | |
1236 | * id not really requried. The crtc and connector will hold the | |
1237 | * display_index as an abstraction to use with DAL component | |
1238 | * | |
1239 | * Returns 0 on success | |
1240 | */ | |
1241 | int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |
1242 | { | |
1243 | struct amdgpu_display_manager *dm = &adev->dm; | |
1244 | uint32_t i; | |
f2a0f5e6 HW |
1245 | struct amdgpu_connector *aconnector = NULL; |
1246 | struct amdgpu_encoder *aencoder = NULL; | |
d4e13b0d | 1247 | struct amdgpu_mode_info *mode_info = &adev->mode_info; |
4562236b | 1248 | uint32_t link_cnt; |
92f3ac40 | 1249 | unsigned long possible_crtcs; |
4562236b HW |
1250 | |
1251 | link_cnt = dm->dc->caps.max_links; | |
4562236b HW |
1252 | if (amdgpu_dm_mode_config_init(dm->adev)) { |
1253 | DRM_ERROR("DM: Failed to initialize mode config\n"); | |
f2a0f5e6 | 1254 | return -1; |
4562236b HW |
1255 | } |
1256 | ||
d4e13b0d AD |
1257 | for (i = 0; i < dm->dc->caps.max_surfaces; i++) { |
1258 | mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane), | |
1259 | GFP_KERNEL); | |
1260 | if (!mode_info->planes[i]) { | |
1261 | DRM_ERROR("KMS: Failed to allocate surface\n"); | |
1262 | goto fail_free_planes; | |
1263 | } | |
1605b3be | 1264 | mode_info->planes[i]->base.type = mode_info->plane_type[i]; |
92f3ac40 LSL |
1265 | |
1266 | /* | |
1267 | * HACK: IGT tests expect that each plane can only have one | |
1268 | * one possible CRTC. For now, set one CRTC for each | |
1269 | * plane that is not an underlay, but still allow multiple | |
1270 | * CRTCs for underlay planes. | |
1271 | */ | |
1272 | possible_crtcs = 1 << i; | |
1273 | if (i >= dm->dc->caps.max_streams) | |
1274 | possible_crtcs = 0xff; | |
1275 | ||
1276 | if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) { | |
d4e13b0d AD |
1277 | DRM_ERROR("KMS: Failed to initialize plane\n"); |
1278 | goto fail_free_planes; | |
1279 | } | |
1280 | } | |
4562236b | 1281 | |
d4e13b0d AD |
1282 | for (i = 0; i < dm->dc->caps.max_streams; i++) |
1283 | if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) { | |
4562236b | 1284 | DRM_ERROR("KMS: Failed to initialize crtc\n"); |
d4e13b0d | 1285 | goto fail_free_planes; |
4562236b | 1286 | } |
4562236b | 1287 | |
ab2541b6 | 1288 | dm->display_indexes_num = dm->dc->caps.max_streams; |
4562236b HW |
1289 | |
1290 | /* loops over all connectors on the board */ | |
1291 | for (i = 0; i < link_cnt; i++) { | |
1292 | ||
1293 | if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { | |
1294 | DRM_ERROR( | |
1295 | "KMS: Cannot support more than %d display indexes\n", | |
1296 | AMDGPU_DM_MAX_DISPLAY_INDEX); | |
1297 | continue; | |
1298 | } | |
1299 | ||
1300 | aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); | |
1301 | if (!aconnector) | |
f2a0f5e6 | 1302 | goto fail_free_planes; |
4562236b HW |
1303 | |
1304 | aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); | |
1305 | if (!aencoder) { | |
1306 | goto fail_free_connector; | |
1307 | } | |
1308 | ||
1309 | if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { | |
1310 | DRM_ERROR("KMS: Failed to initialize encoder\n"); | |
1311 | goto fail_free_encoder; | |
1312 | } | |
1313 | ||
1314 | if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { | |
1315 | DRM_ERROR("KMS: Failed to initialize connector\n"); | |
f2a0f5e6 | 1316 | goto fail_free_encoder; |
4562236b HW |
1317 | } |
1318 | ||
1319 | if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true)) | |
1320 | amdgpu_dm_update_connector_after_detect(aconnector); | |
1321 | } | |
1322 | ||
1323 | /* Software is initialized. Now we can register interrupt handlers. */ | |
1324 | switch (adev->asic_type) { | |
1325 | case CHIP_BONAIRE: | |
1326 | case CHIP_HAWAII: | |
1327 | case CHIP_TONGA: | |
1328 | case CHIP_FIJI: | |
1329 | case CHIP_CARRIZO: | |
1330 | case CHIP_STONEY: | |
1331 | case CHIP_POLARIS11: | |
1332 | case CHIP_POLARIS10: | |
b264d345 | 1333 | case CHIP_POLARIS12: |
2c8ad2d5 | 1334 | case CHIP_VEGA10: |
4562236b HW |
1335 | if (dce110_register_irq_handlers(dm->adev)) { |
1336 | DRM_ERROR("DM: Failed to initialize IRQ\n"); | |
d4e13b0d | 1337 | goto fail_free_encoder; |
4562236b HW |
1338 | } |
1339 | break; | |
ff5ef992 AD |
1340 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
1341 | case CHIP_RAVEN: | |
1342 | if (dcn10_register_irq_handlers(dm->adev)) { | |
1343 | DRM_ERROR("DM: Failed to initialize IRQ\n"); | |
1344 | goto fail_free_encoder; | |
1345 | } | |
1346 | break; | |
1347 | #endif | |
4562236b HW |
1348 | default: |
1349 | DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); | |
d4e13b0d | 1350 | goto fail_free_encoder; |
4562236b HW |
1351 | } |
1352 | ||
1353 | drm_mode_config_reset(dm->ddev); | |
1354 | ||
1355 | return 0; | |
1356 | fail_free_encoder: | |
1357 | kfree(aencoder); | |
1358 | fail_free_connector: | |
1359 | kfree(aconnector); | |
d4e13b0d AD |
1360 | fail_free_planes: |
1361 | for (i = 0; i < dm->dc->caps.max_surfaces; i++) | |
1362 | kfree(mode_info->planes[i]); | |
4562236b HW |
1363 | return -1; |
1364 | } | |
1365 | ||
1366 | void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) | |
1367 | { | |
1368 | drm_mode_config_cleanup(dm->ddev); | |
1369 | return; | |
1370 | } | |
1371 | ||
1372 | /****************************************************************************** | |
1373 | * amdgpu_display_funcs functions | |
1374 | *****************************************************************************/ | |
1375 | ||
1376 | /** | |
1377 | * dm_bandwidth_update - program display watermarks | |
1378 | * | |
1379 | * @adev: amdgpu_device pointer | |
1380 | * | |
1381 | * Calculate and program the display watermarks and line buffer allocation. | |
1382 | */ | |
1383 | static void dm_bandwidth_update(struct amdgpu_device *adev) | |
1384 | { | |
49c07a99 | 1385 | /* TODO: implement later */ |
4562236b HW |
1386 | } |
1387 | ||
1388 | static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, | |
1389 | u8 level) | |
1390 | { | |
1391 | /* TODO: translate amdgpu_encoder to display_index and call DAL */ | |
4562236b HW |
1392 | } |
1393 | ||
1394 | static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) | |
1395 | { | |
1396 | /* TODO: translate amdgpu_encoder to display_index and call DAL */ | |
4562236b HW |
1397 | return 0; |
1398 | } | |
1399 | ||
4562236b HW |
1400 | static int amdgpu_notify_freesync(struct drm_device *dev, void *data, |
1401 | struct drm_file *filp) | |
1402 | { | |
1403 | struct mod_freesync_params freesync_params; | |
ab2541b6 | 1404 | uint8_t num_streams; |
4562236b | 1405 | uint8_t i; |
4562236b HW |
1406 | |
1407 | struct amdgpu_device *adev = dev->dev_private; | |
1408 | int r = 0; | |
1409 | ||
1410 | /* Get freesync enable flag from DRM */ | |
1411 | ||
ab2541b6 | 1412 | num_streams = dc_get_current_stream_count(adev->dm.dc); |
4562236b | 1413 | |
ab2541b6 | 1414 | for (i = 0; i < num_streams; i++) { |
4fa086b9 | 1415 | struct dc_stream *stream; |
ab2541b6 | 1416 | stream = dc_get_stream_at_index(adev->dm.dc, i); |
4562236b HW |
1417 | |
1418 | mod_freesync_update_state(adev->dm.freesync_module, | |
ab2541b6 | 1419 | &stream, 1, &freesync_params); |
4562236b HW |
1420 | } |
1421 | ||
1422 | return r; | |
1423 | } | |
1424 | ||
39cc5be2 | 1425 | static const struct amdgpu_display_funcs dm_display_funcs = { |
4562236b HW |
1426 | .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ |
1427 | .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ | |
1428 | .vblank_wait = NULL, | |
1429 | .backlight_set_level = | |
1430 | dm_set_backlight_level,/* called unconditionally */ | |
1431 | .backlight_get_level = | |
1432 | dm_get_backlight_level,/* called unconditionally */ | |
1433 | .hpd_sense = NULL,/* called unconditionally */ | |
1434 | .hpd_set_polarity = NULL, /* called unconditionally */ | |
1435 | .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ | |
4562236b HW |
1436 | .page_flip_get_scanoutpos = |
1437 | dm_crtc_get_scanoutpos,/* called unconditionally */ | |
1438 | .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ | |
1439 | .add_connector = NULL, /* VBIOS parsing. DAL does it. */ | |
1440 | .notify_freesync = amdgpu_notify_freesync, | |
1441 | ||
1442 | }; | |
1443 | ||
2c8ad2d5 | 1444 | |
4562236b HW |
1445 | #if defined(CONFIG_DEBUG_KERNEL_DC) |
1446 | ||
1447 | static ssize_t s3_debug_store( | |
1448 | struct device *device, | |
1449 | struct device_attribute *attr, | |
1450 | const char *buf, | |
1451 | size_t count) | |
1452 | { | |
1453 | int ret; | |
1454 | int s3_state; | |
1455 | struct pci_dev *pdev = to_pci_dev(device); | |
1456 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | |
1457 | struct amdgpu_device *adev = drm_dev->dev_private; | |
1458 | ||
1459 | ret = kstrtoint(buf, 0, &s3_state); | |
1460 | ||
1461 | if (ret == 0) { | |
1462 | if (s3_state) { | |
1463 | dm_resume(adev); | |
1464 | amdgpu_dm_display_resume(adev); | |
1465 | drm_kms_helper_hotplug_event(adev->ddev); | |
1466 | } else | |
1467 | dm_suspend(adev); | |
1468 | } | |
1469 | ||
1470 | return ret == 0 ? count : 0; | |
1471 | } | |
1472 | ||
1473 | DEVICE_ATTR_WO(s3_debug); | |
1474 | ||
1475 | #endif | |
1476 | ||
1477 | static int dm_early_init(void *handle) | |
1478 | { | |
1479 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1480 | ||
d7ec53d9 | 1481 | adev->ddev->driver->driver_features |= DRIVER_ATOMIC; |
4562236b HW |
1482 | amdgpu_dm_set_irq_funcs(adev); |
1483 | ||
1484 | switch (adev->asic_type) { | |
1485 | case CHIP_BONAIRE: | |
1486 | case CHIP_HAWAII: | |
1487 | adev->mode_info.num_crtc = 6; | |
1488 | adev->mode_info.num_hpd = 6; | |
1489 | adev->mode_info.num_dig = 6; | |
d4e13b0d | 1490 | adev->mode_info.plane_type = dm_surfaces_type_default; |
4562236b HW |
1491 | break; |
1492 | case CHIP_FIJI: | |
1493 | case CHIP_TONGA: | |
1494 | adev->mode_info.num_crtc = 6; | |
1495 | adev->mode_info.num_hpd = 6; | |
1496 | adev->mode_info.num_dig = 7; | |
d4e13b0d | 1497 | adev->mode_info.plane_type = dm_surfaces_type_default; |
4562236b HW |
1498 | break; |
1499 | case CHIP_CARRIZO: | |
1500 | adev->mode_info.num_crtc = 3; | |
1501 | adev->mode_info.num_hpd = 6; | |
1502 | adev->mode_info.num_dig = 9; | |
d4e13b0d | 1503 | adev->mode_info.plane_type = dm_surfaces_type_carizzo; |
4562236b HW |
1504 | break; |
1505 | case CHIP_STONEY: | |
1506 | adev->mode_info.num_crtc = 2; | |
1507 | adev->mode_info.num_hpd = 6; | |
1508 | adev->mode_info.num_dig = 9; | |
d4e13b0d | 1509 | adev->mode_info.plane_type = dm_surfaces_type_stoney; |
4562236b HW |
1510 | break; |
1511 | case CHIP_POLARIS11: | |
b264d345 | 1512 | case CHIP_POLARIS12: |
4562236b HW |
1513 | adev->mode_info.num_crtc = 5; |
1514 | adev->mode_info.num_hpd = 5; | |
1515 | adev->mode_info.num_dig = 5; | |
d4e13b0d | 1516 | adev->mode_info.plane_type = dm_surfaces_type_default; |
4562236b HW |
1517 | break; |
1518 | case CHIP_POLARIS10: | |
1519 | adev->mode_info.num_crtc = 6; | |
1520 | adev->mode_info.num_hpd = 6; | |
1521 | adev->mode_info.num_dig = 6; | |
d4e13b0d | 1522 | adev->mode_info.plane_type = dm_surfaces_type_default; |
4562236b | 1523 | break; |
2c8ad2d5 AD |
1524 | case CHIP_VEGA10: |
1525 | adev->mode_info.num_crtc = 6; | |
1526 | adev->mode_info.num_hpd = 6; | |
1527 | adev->mode_info.num_dig = 6; | |
6f43fd62 | 1528 | adev->mode_info.plane_type = dm_surfaces_type_default; |
2c8ad2d5 | 1529 | break; |
ff5ef992 AD |
1530 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
1531 | case CHIP_RAVEN: | |
1532 | adev->mode_info.num_crtc = 4; | |
1533 | adev->mode_info.num_hpd = 4; | |
1534 | adev->mode_info.num_dig = 4; | |
1535 | adev->mode_info.plane_type = dm_surfaces_type_default; | |
1536 | break; | |
1537 | #endif | |
4562236b HW |
1538 | default: |
1539 | DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); | |
1540 | return -EINVAL; | |
1541 | } | |
1542 | ||
39cc5be2 AD |
1543 | if (adev->mode_info.funcs == NULL) |
1544 | adev->mode_info.funcs = &dm_display_funcs; | |
1545 | ||
4562236b HW |
1546 | /* Note: Do NOT change adev->audio_endpt_rreg and |
1547 | * adev->audio_endpt_wreg because they are initialised in | |
1548 | * amdgpu_device_init() */ | |
1549 | #if defined(CONFIG_DEBUG_KERNEL_DC) | |
1550 | device_create_file( | |
1551 | adev->ddev->dev, | |
1552 | &dev_attr_s3_debug); | |
1553 | #endif | |
1554 | ||
1555 | return 0; | |
1556 | } | |
1557 | ||
1558 | bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm) | |
1559 | { | |
1560 | /* TODO */ | |
1561 | return true; | |
1562 | } | |
1563 | ||
1564 | bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm) | |
1565 | { | |
e7b07cee HW |
1566 | /* TODO */ return true; |
1567 | } | |
1568 | ||
1569 | ||
1570 | struct dm_connector_state { | |
1571 | struct drm_connector_state base; | |
1572 | ||
1573 | enum amdgpu_rmx_type scaling; | |
1574 | uint8_t underscan_vborder; | |
1575 | uint8_t underscan_hborder; | |
1576 | bool underscan_enable; | |
1577 | }; | |
1578 | ||
1579 | #define to_dm_connector_state(x)\ | |
1580 | container_of((x), struct dm_connector_state, base) | |
1581 | ||
1582 | static bool modeset_required(struct drm_crtc_state *crtc_state) | |
1583 | { | |
1584 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) | |
1585 | return false; | |
1586 | ||
1587 | if (!crtc_state->enable) | |
1588 | return false; | |
1589 | ||
1590 | return crtc_state->active; | |
1591 | } | |
1592 | ||
1593 | static bool modereset_required(struct drm_crtc_state *crtc_state) | |
1594 | { | |
1595 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) | |
1596 | return false; | |
1597 | ||
1598 | return !crtc_state->enable || !crtc_state->active; | |
1599 | } | |
1600 | ||
1601 | void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) | |
1602 | { | |
1603 | drm_encoder_cleanup(encoder); | |
1604 | kfree(encoder); | |
1605 | } | |
1606 | ||
1607 | static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { | |
1608 | .destroy = amdgpu_dm_encoder_destroy, | |
1609 | }; | |
1610 | ||
1611 | static void dm_set_cursor( | |
1612 | struct amdgpu_crtc *amdgpu_crtc, | |
1613 | uint64_t gpu_addr, | |
1614 | uint32_t width, | |
1615 | uint32_t height) | |
1616 | { | |
1617 | struct dc_cursor_attributes attributes; | |
1618 | struct dc_cursor_position position; | |
1619 | struct drm_crtc *crtc = &amdgpu_crtc->base; | |
1620 | int x, y; | |
1621 | int xorigin = 0, yorigin = 0; | |
1622 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); | |
1623 | ||
1624 | amdgpu_crtc->cursor_width = width; | |
1625 | amdgpu_crtc->cursor_height = height; | |
1626 | ||
1627 | attributes.address.high_part = upper_32_bits(gpu_addr); | |
1628 | attributes.address.low_part = lower_32_bits(gpu_addr); | |
1629 | attributes.width = width; | |
1630 | attributes.height = height; | |
1631 | attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; | |
1632 | attributes.rotation_angle = 0; | |
1633 | attributes.attribute_flags.value = 0; | |
1634 | ||
1635 | attributes.pitch = attributes.width; | |
1636 | ||
1637 | x = amdgpu_crtc->cursor_x; | |
1638 | y = amdgpu_crtc->cursor_y; | |
1639 | ||
1640 | /* avivo cursor are offset into the total surface */ | |
1641 | x += crtc->primary->state->src_x >> 16; | |
1642 | y += crtc->primary->state->src_y >> 16; | |
1643 | ||
1644 | if (x < 0) { | |
1645 | xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); | |
1646 | x = 0; | |
1647 | } | |
1648 | if (y < 0) { | |
1649 | yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); | |
1650 | y = 0; | |
1651 | } | |
1652 | ||
1653 | position.enable = true; | |
1654 | position.x = x; | |
1655 | position.y = y; | |
1656 | ||
1657 | position.x_hotspot = xorigin; | |
1658 | position.y_hotspot = yorigin; | |
1659 | ||
1660 | if (!dc_stream_set_cursor_attributes( | |
1661 | acrtc_state->stream, | |
1662 | &attributes)) { | |
1663 | DRM_ERROR("DC failed to set cursor attributes\n"); | |
1664 | } | |
1665 | ||
1666 | if (!dc_stream_set_cursor_position( | |
1667 | acrtc_state->stream, | |
1668 | &position)) { | |
1669 | DRM_ERROR("DC failed to set cursor position\n"); | |
1670 | } | |
1671 | } | |
1672 | ||
1673 | static int dm_crtc_cursor_set( | |
1674 | struct drm_crtc *crtc, | |
1675 | uint64_t address, | |
1676 | uint32_t width, | |
1677 | uint32_t height) | |
1678 | { | |
1679 | struct dc_cursor_position position; | |
1680 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); | |
1681 | ||
1682 | int ret; | |
1683 | ||
1684 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
1685 | ret = EINVAL; | |
1686 | ||
1687 | DRM_DEBUG_KMS( | |
1688 | "%s: crtc_id=%d with size %d to %d \n", | |
1689 | __func__, | |
1690 | amdgpu_crtc->crtc_id, | |
1691 | width, | |
1692 | height); | |
1693 | ||
1694 | if (!address) { | |
1695 | /* turn off cursor */ | |
1696 | position.enable = false; | |
1697 | position.x = 0; | |
1698 | position.y = 0; | |
1699 | ||
1700 | if (acrtc_state->stream) { | |
1701 | /*set cursor visible false*/ | |
1702 | dc_stream_set_cursor_position( | |
1703 | acrtc_state->stream, | |
1704 | &position); | |
1705 | } | |
1706 | goto release; | |
1707 | ||
1708 | } | |
1709 | ||
1710 | if ((width > amdgpu_crtc->max_cursor_width) || | |
1711 | (height > amdgpu_crtc->max_cursor_height)) { | |
1712 | DRM_ERROR( | |
1713 | "%s: bad cursor width or height %d x %d\n", | |
1714 | __func__, | |
1715 | width, | |
1716 | height); | |
1717 | goto release; | |
1718 | } | |
1719 | ||
1720 | /*program new cursor bo to hardware*/ | |
1721 | dm_set_cursor(amdgpu_crtc, address, width, height); | |
1722 | ||
1723 | release: | |
1724 | return ret; | |
1725 | ||
1726 | } | |
1727 | ||
1728 | static int dm_crtc_cursor_move(struct drm_crtc *crtc, | |
1729 | int x, int y) | |
1730 | { | |
1731 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
1732 | int xorigin = 0, yorigin = 0; | |
1733 | struct dc_cursor_position position; | |
1734 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); | |
1735 | ||
1736 | amdgpu_crtc->cursor_x = x; | |
1737 | amdgpu_crtc->cursor_y = y; | |
1738 | ||
1739 | /* avivo cursor are offset into the total surface */ | |
1740 | x += crtc->primary->state->src_x >> 16; | |
1741 | y += crtc->primary->state->src_y >> 16; | |
1742 | ||
1743 | /* | |
1744 | * TODO: for cursor debugging unguard the following | |
1745 | */ | |
1746 | #if 0 | |
1747 | DRM_DEBUG_KMS( | |
1748 | "%s: x %d y %d c->x %d c->y %d\n", | |
1749 | __func__, | |
1750 | x, | |
1751 | y, | |
1752 | crtc->x, | |
1753 | crtc->y); | |
1754 | #endif | |
1755 | ||
1756 | if (x < 0) { | |
1757 | xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); | |
1758 | x = 0; | |
1759 | } | |
1760 | if (y < 0) { | |
1761 | yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); | |
1762 | y = 0; | |
1763 | } | |
1764 | ||
1765 | position.enable = true; | |
1766 | position.x = x; | |
1767 | position.y = y; | |
1768 | ||
1769 | position.x_hotspot = xorigin; | |
1770 | position.y_hotspot = yorigin; | |
1771 | ||
1772 | if (acrtc_state->stream) { | |
1773 | if (!dc_stream_set_cursor_position( | |
1774 | acrtc_state->stream, | |
1775 | &position)) { | |
1776 | DRM_ERROR("DC failed to set cursor position\n"); | |
1777 | return -EINVAL; | |
1778 | } | |
1779 | } | |
1780 | ||
1781 | return 0; | |
1782 | } | |
1783 | ||
1784 | static bool fill_rects_from_plane_state( | |
1785 | const struct drm_plane_state *state, | |
1786 | struct dc_surface *surface) | |
1787 | { | |
1788 | surface->src_rect.x = state->src_x >> 16; | |
1789 | surface->src_rect.y = state->src_y >> 16; | |
1790 | /*we ignore for now mantissa and do not to deal with floating pixels :(*/ | |
1791 | surface->src_rect.width = state->src_w >> 16; | |
1792 | ||
1793 | if (surface->src_rect.width == 0) | |
1794 | return false; | |
1795 | ||
1796 | surface->src_rect.height = state->src_h >> 16; | |
1797 | if (surface->src_rect.height == 0) | |
1798 | return false; | |
1799 | ||
1800 | surface->dst_rect.x = state->crtc_x; | |
1801 | surface->dst_rect.y = state->crtc_y; | |
1802 | ||
1803 | if (state->crtc_w == 0) | |
1804 | return false; | |
1805 | ||
1806 | surface->dst_rect.width = state->crtc_w; | |
1807 | ||
1808 | if (state->crtc_h == 0) | |
1809 | return false; | |
1810 | ||
1811 | surface->dst_rect.height = state->crtc_h; | |
1812 | ||
1813 | surface->clip_rect = surface->dst_rect; | |
1814 | ||
1815 | switch (state->rotation & DRM_MODE_ROTATE_MASK) { | |
1816 | case DRM_MODE_ROTATE_0: | |
1817 | surface->rotation = ROTATION_ANGLE_0; | |
1818 | break; | |
1819 | case DRM_MODE_ROTATE_90: | |
1820 | surface->rotation = ROTATION_ANGLE_90; | |
1821 | break; | |
1822 | case DRM_MODE_ROTATE_180: | |
1823 | surface->rotation = ROTATION_ANGLE_180; | |
1824 | break; | |
1825 | case DRM_MODE_ROTATE_270: | |
1826 | surface->rotation = ROTATION_ANGLE_270; | |
1827 | break; | |
1828 | default: | |
1829 | surface->rotation = ROTATION_ANGLE_0; | |
1830 | break; | |
1831 | } | |
1832 | ||
4562236b HW |
1833 | return true; |
1834 | } | |
e7b07cee HW |
1835 | static int get_fb_info( |
1836 | const struct amdgpu_framebuffer *amdgpu_fb, | |
1837 | uint64_t *tiling_flags, | |
1838 | uint64_t *fb_location) | |
1839 | { | |
1840 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | |
1841 | int r = amdgpu_bo_reserve(rbo, false); | |
1842 | if (unlikely(r)) { | |
1843 | DRM_ERROR("Unable to reserve buffer\n"); | |
1844 | return r; | |
1845 | } | |
1846 | ||
1847 | if (fb_location) | |
1848 | *fb_location = amdgpu_bo_gpu_offset(rbo); | |
1849 | ||
1850 | if (tiling_flags) | |
1851 | amdgpu_bo_get_tiling_flags(rbo, tiling_flags); | |
1852 | ||
1853 | amdgpu_bo_unreserve(rbo); | |
1854 | ||
1855 | return r; | |
1856 | } | |
1857 | ||
1858 | static int fill_plane_attributes_from_fb( | |
1859 | struct amdgpu_device *adev, | |
1860 | struct dc_surface *surface, | |
1861 | const struct amdgpu_framebuffer *amdgpu_fb, bool addReq) | |
1862 | { | |
1863 | uint64_t tiling_flags; | |
1864 | uint64_t fb_location = 0; | |
1865 | unsigned int awidth; | |
1866 | const struct drm_framebuffer *fb = &amdgpu_fb->base; | |
1867 | int ret = 0; | |
1868 | struct drm_format_name_buf format_name; | |
1869 | ||
1870 | ret = get_fb_info( | |
1871 | amdgpu_fb, | |
1872 | &tiling_flags, | |
1873 | addReq == true ? &fb_location:NULL); | |
1874 | ||
1875 | if (ret) | |
1876 | return ret; | |
1877 | ||
1878 | switch (fb->format->format) { | |
1879 | case DRM_FORMAT_C8: | |
1880 | surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; | |
1881 | break; | |
1882 | case DRM_FORMAT_RGB565: | |
1883 | surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; | |
1884 | break; | |
1885 | case DRM_FORMAT_XRGB8888: | |
1886 | case DRM_FORMAT_ARGB8888: | |
1887 | surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; | |
1888 | break; | |
1889 | case DRM_FORMAT_XRGB2101010: | |
1890 | case DRM_FORMAT_ARGB2101010: | |
1891 | surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; | |
1892 | break; | |
1893 | case DRM_FORMAT_XBGR2101010: | |
1894 | case DRM_FORMAT_ABGR2101010: | |
1895 | surface->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; | |
1896 | break; | |
1897 | case DRM_FORMAT_NV21: | |
1898 | surface->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; | |
1899 | break; | |
1900 | case DRM_FORMAT_NV12: | |
1901 | surface->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; | |
1902 | break; | |
1903 | default: | |
1904 | DRM_ERROR("Unsupported screen format %s\n", | |
1905 | drm_get_format_name(fb->format->format, &format_name)); | |
1906 | return -EINVAL; | |
1907 | } | |
1908 | ||
1909 | if (surface->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { | |
1910 | surface->address.type = PLN_ADDR_TYPE_GRAPHICS; | |
1911 | surface->address.grph.addr.low_part = lower_32_bits(fb_location); | |
1912 | surface->address.grph.addr.high_part = upper_32_bits(fb_location); | |
1913 | surface->plane_size.grph.surface_size.x = 0; | |
1914 | surface->plane_size.grph.surface_size.y = 0; | |
1915 | surface->plane_size.grph.surface_size.width = fb->width; | |
1916 | surface->plane_size.grph.surface_size.height = fb->height; | |
1917 | surface->plane_size.grph.surface_pitch = | |
1918 | fb->pitches[0] / fb->format->cpp[0]; | |
1919 | /* TODO: unhardcode */ | |
1920 | surface->color_space = COLOR_SPACE_SRGB; | |
1921 | ||
1922 | } else { | |
1923 | awidth = ALIGN(fb->width, 64); | |
1924 | surface->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; | |
1925 | surface->address.video_progressive.luma_addr.low_part | |
1926 | = lower_32_bits(fb_location); | |
1927 | surface->address.video_progressive.chroma_addr.low_part | |
1928 | = lower_32_bits(fb_location) + | |
1929 | (awidth * fb->height); | |
1930 | surface->plane_size.video.luma_size.x = 0; | |
1931 | surface->plane_size.video.luma_size.y = 0; | |
1932 | surface->plane_size.video.luma_size.width = awidth; | |
1933 | surface->plane_size.video.luma_size.height = fb->height; | |
1934 | /* TODO: unhardcode */ | |
1935 | surface->plane_size.video.luma_pitch = awidth; | |
1936 | ||
1937 | surface->plane_size.video.chroma_size.x = 0; | |
1938 | surface->plane_size.video.chroma_size.y = 0; | |
1939 | surface->plane_size.video.chroma_size.width = awidth; | |
1940 | surface->plane_size.video.chroma_size.height = fb->height; | |
1941 | surface->plane_size.video.chroma_pitch = awidth / 2; | |
1942 | ||
1943 | /* TODO: unhardcode */ | |
1944 | surface->color_space = COLOR_SPACE_YCBCR709; | |
1945 | } | |
1946 | ||
1947 | memset(&surface->tiling_info, 0, sizeof(surface->tiling_info)); | |
1948 | ||
1949 | /* Fill GFX params */ | |
1950 | if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) | |
1951 | { | |
1952 | unsigned bankw, bankh, mtaspect, tile_split, num_banks; | |
1953 | ||
1954 | bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); | |
1955 | bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); | |
1956 | mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); | |
1957 | tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); | |
1958 | num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); | |
1959 | ||
1960 | /* XXX fix me for VI */ | |
1961 | surface->tiling_info.gfx8.num_banks = num_banks; | |
1962 | surface->tiling_info.gfx8.array_mode = | |
1963 | DC_ARRAY_2D_TILED_THIN1; | |
1964 | surface->tiling_info.gfx8.tile_split = tile_split; | |
1965 | surface->tiling_info.gfx8.bank_width = bankw; | |
1966 | surface->tiling_info.gfx8.bank_height = bankh; | |
1967 | surface->tiling_info.gfx8.tile_aspect = mtaspect; | |
1968 | surface->tiling_info.gfx8.tile_mode = | |
1969 | DC_ADDR_SURF_MICRO_TILING_DISPLAY; | |
1970 | } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) | |
1971 | == DC_ARRAY_1D_TILED_THIN1) { | |
1972 | surface->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; | |
1973 | } | |
1974 | ||
1975 | surface->tiling_info.gfx8.pipe_config = | |
1976 | AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); | |
1977 | ||
1978 | if (adev->asic_type == CHIP_VEGA10 || | |
1979 | adev->asic_type == CHIP_RAVEN) { | |
1980 | /* Fill GFX9 params */ | |
1981 | surface->tiling_info.gfx9.num_pipes = | |
1982 | adev->gfx.config.gb_addr_config_fields.num_pipes; | |
1983 | surface->tiling_info.gfx9.num_banks = | |
1984 | adev->gfx.config.gb_addr_config_fields.num_banks; | |
1985 | surface->tiling_info.gfx9.pipe_interleave = | |
1986 | adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; | |
1987 | surface->tiling_info.gfx9.num_shader_engines = | |
1988 | adev->gfx.config.gb_addr_config_fields.num_se; | |
1989 | surface->tiling_info.gfx9.max_compressed_frags = | |
1990 | adev->gfx.config.gb_addr_config_fields.max_compress_frags; | |
1991 | surface->tiling_info.gfx9.num_rb_per_se = | |
1992 | adev->gfx.config.gb_addr_config_fields.num_rb_per_se; | |
1993 | surface->tiling_info.gfx9.swizzle = | |
1994 | AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); | |
1995 | surface->tiling_info.gfx9.shaderEnable = 1; | |
1996 | } | |
1997 | ||
1998 | surface->visible = true; | |
1999 | surface->scaling_quality.h_taps_c = 0; | |
2000 | surface->scaling_quality.v_taps_c = 0; | |
2001 | ||
2002 | /* is this needed? is surface zeroed at allocation? */ | |
2003 | surface->scaling_quality.h_taps = 0; | |
2004 | surface->scaling_quality.v_taps = 0; | |
2005 | surface->stereo_format = PLANE_STEREO_FORMAT_NONE; | |
2006 | ||
2007 | return ret; | |
2008 | ||
2009 | } | |
2010 | ||
2011 | #define NUM_OF_RAW_GAMMA_RAMP_RGB_256 256 | |
2012 | ||
2013 | static void fill_gamma_from_crtc_state( | |
2014 | const struct drm_crtc_state *crtc_state, | |
2015 | struct dc_surface *dc_surface) | |
2016 | { | |
2017 | int i; | |
2018 | struct dc_gamma *gamma; | |
2019 | struct drm_color_lut *lut = (struct drm_color_lut *) crtc_state->gamma_lut->data; | |
2020 | ||
2021 | gamma = dc_create_gamma(); | |
2022 | ||
2023 | if (gamma == NULL) { | |
2024 | WARN_ON(1); | |
2025 | return; | |
2026 | } | |
2027 | ||
2028 | for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) { | |
2029 | gamma->red[i] = lut[i].red; | |
2030 | gamma->green[i] = lut[i].green; | |
2031 | gamma->blue[i] = lut[i].blue; | |
2032 | } | |
2033 | ||
2034 | dc_surface->gamma_correction = gamma; | |
2035 | } | |
2036 | ||
2037 | static int fill_plane_attributes( | |
2038 | struct amdgpu_device *adev, | |
2039 | struct dc_surface *surface, | |
2040 | struct drm_plane_state *plane_state, | |
2041 | struct drm_crtc_state *crtc_state, | |
2042 | bool addrReq) | |
2043 | { | |
2044 | const struct amdgpu_framebuffer *amdgpu_fb = | |
2045 | to_amdgpu_framebuffer(plane_state->fb); | |
2046 | const struct drm_crtc *crtc = plane_state->crtc; | |
2047 | struct dc_transfer_func *input_tf; | |
2048 | int ret = 0; | |
2049 | ||
2050 | if (!fill_rects_from_plane_state(plane_state, surface)) | |
2051 | return -EINVAL; | |
2052 | ||
2053 | ret = fill_plane_attributes_from_fb( | |
2054 | crtc->dev->dev_private, | |
2055 | surface, | |
2056 | amdgpu_fb, | |
2057 | addrReq); | |
2058 | ||
2059 | if (ret) | |
2060 | return ret; | |
2061 | ||
2062 | input_tf = dc_create_transfer_func(); | |
2063 | ||
2064 | if (input_tf == NULL) | |
2065 | return -ENOMEM; | |
2066 | ||
2067 | input_tf->type = TF_TYPE_PREDEFINED; | |
2068 | input_tf->tf = TRANSFER_FUNCTION_SRGB; | |
2069 | ||
2070 | surface->in_transfer_func = input_tf; | |
2071 | ||
2072 | /* In case of gamma set, update gamma value */ | |
2073 | if (crtc_state->gamma_lut) | |
2074 | fill_gamma_from_crtc_state(crtc_state, surface); | |
2075 | ||
2076 | return ret; | |
2077 | } | |
2078 | ||
2079 | /*****************************************************************************/ | |
2080 | ||
2081 | struct amdgpu_connector *aconnector_from_drm_crtc_id( | |
2082 | const struct drm_crtc *crtc) | |
2083 | { | |
2084 | struct drm_device *dev = crtc->dev; | |
2085 | struct drm_connector *connector; | |
2086 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | |
2087 | struct amdgpu_connector *aconnector; | |
2088 | ||
2089 | list_for_each_entry(connector, | |
2090 | &dev->mode_config.connector_list, head) { | |
2091 | ||
2092 | aconnector = to_amdgpu_connector(connector); | |
2093 | ||
2094 | if (aconnector->base.state->crtc != &acrtc->base) | |
2095 | continue; | |
2096 | ||
2097 | /* Found the connector */ | |
2098 | return aconnector; | |
2099 | } | |
2100 | ||
2101 | /* If we get here, not found. */ | |
2102 | return NULL; | |
2103 | } | |
2104 | ||
2105 | static void update_stream_scaling_settings( | |
2106 | const struct drm_display_mode *mode, | |
2107 | const struct dm_connector_state *dm_state, | |
2108 | struct dc_stream *stream) | |
2109 | { | |
2110 | enum amdgpu_rmx_type rmx_type; | |
2111 | ||
2112 | struct rect src = { 0 }; /* viewport in composition space*/ | |
2113 | struct rect dst = { 0 }; /* stream addressable area */ | |
2114 | ||
2115 | /* no mode. nothing to be done */ | |
2116 | if (!mode) | |
2117 | return; | |
2118 | ||
2119 | /* Full screen scaling by default */ | |
2120 | src.width = mode->hdisplay; | |
2121 | src.height = mode->vdisplay; | |
2122 | dst.width = stream->timing.h_addressable; | |
2123 | dst.height = stream->timing.v_addressable; | |
2124 | ||
2125 | rmx_type = dm_state->scaling; | |
2126 | if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { | |
2127 | if (src.width * dst.height < | |
2128 | src.height * dst.width) { | |
2129 | /* height needs less upscaling/more downscaling */ | |
2130 | dst.width = src.width * | |
2131 | dst.height / src.height; | |
2132 | } else { | |
2133 | /* width needs less upscaling/more downscaling */ | |
2134 | dst.height = src.height * | |
2135 | dst.width / src.width; | |
2136 | } | |
2137 | } else if (rmx_type == RMX_CENTER) { | |
2138 | dst = src; | |
2139 | } | |
2140 | ||
2141 | dst.x = (stream->timing.h_addressable - dst.width) / 2; | |
2142 | dst.y = (stream->timing.v_addressable - dst.height) / 2; | |
2143 | ||
2144 | if (dm_state->underscan_enable) { | |
2145 | dst.x += dm_state->underscan_hborder / 2; | |
2146 | dst.y += dm_state->underscan_vborder / 2; | |
2147 | dst.width -= dm_state->underscan_hborder; | |
2148 | dst.height -= dm_state->underscan_vborder; | |
2149 | } | |
2150 | ||
2151 | stream->src = src; | |
2152 | stream->dst = dst; | |
2153 | ||
2154 | DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", | |
2155 | dst.x, dst.y, dst.width, dst.height); | |
2156 | ||
2157 | } | |
2158 | ||
2159 | static enum dc_color_depth convert_color_depth_from_display_info( | |
2160 | const struct drm_connector *connector) | |
2161 | { | |
2162 | uint32_t bpc = connector->display_info.bpc; | |
2163 | ||
2164 | /* Limited color depth to 8bit | |
2165 | * TODO: Still need to handle deep color*/ | |
2166 | if (bpc > 8) | |
2167 | bpc = 8; | |
2168 | ||
2169 | switch (bpc) { | |
2170 | case 0: | |
2171 | /* Temporary Work around, DRM don't parse color depth for | |
2172 | * EDID revision before 1.4 | |
2173 | * TODO: Fix edid parsing | |
2174 | */ | |
2175 | return COLOR_DEPTH_888; | |
2176 | case 6: | |
2177 | return COLOR_DEPTH_666; | |
2178 | case 8: | |
2179 | return COLOR_DEPTH_888; | |
2180 | case 10: | |
2181 | return COLOR_DEPTH_101010; | |
2182 | case 12: | |
2183 | return COLOR_DEPTH_121212; | |
2184 | case 14: | |
2185 | return COLOR_DEPTH_141414; | |
2186 | case 16: | |
2187 | return COLOR_DEPTH_161616; | |
2188 | default: | |
2189 | return COLOR_DEPTH_UNDEFINED; | |
2190 | } | |
2191 | } | |
2192 | ||
2193 | static enum dc_aspect_ratio get_aspect_ratio( | |
2194 | const struct drm_display_mode *mode_in) | |
2195 | { | |
2196 | int32_t width = mode_in->crtc_hdisplay * 9; | |
2197 | int32_t height = mode_in->crtc_vdisplay * 16; | |
2198 | if ((width - height) < 10 && (width - height) > -10) | |
2199 | return ASPECT_RATIO_16_9; | |
2200 | else | |
2201 | return ASPECT_RATIO_4_3; | |
2202 | } | |
2203 | ||
2204 | static enum dc_color_space get_output_color_space( | |
2205 | const struct dc_crtc_timing *dc_crtc_timing) | |
2206 | { | |
2207 | enum dc_color_space color_space = COLOR_SPACE_SRGB; | |
2208 | ||
2209 | switch (dc_crtc_timing->pixel_encoding) { | |
2210 | case PIXEL_ENCODING_YCBCR422: | |
2211 | case PIXEL_ENCODING_YCBCR444: | |
2212 | case PIXEL_ENCODING_YCBCR420: | |
2213 | { | |
2214 | /* | |
2215 | * 27030khz is the separation point between HDTV and SDTV | |
2216 | * according to HDMI spec, we use YCbCr709 and YCbCr601 | |
2217 | * respectively | |
2218 | */ | |
2219 | if (dc_crtc_timing->pix_clk_khz > 27030) { | |
2220 | if (dc_crtc_timing->flags.Y_ONLY) | |
2221 | color_space = | |
2222 | COLOR_SPACE_YCBCR709_LIMITED; | |
2223 | else | |
2224 | color_space = COLOR_SPACE_YCBCR709; | |
2225 | } else { | |
2226 | if (dc_crtc_timing->flags.Y_ONLY) | |
2227 | color_space = | |
2228 | COLOR_SPACE_YCBCR601_LIMITED; | |
2229 | else | |
2230 | color_space = COLOR_SPACE_YCBCR601; | |
2231 | } | |
2232 | ||
2233 | } | |
2234 | break; | |
2235 | case PIXEL_ENCODING_RGB: | |
2236 | color_space = COLOR_SPACE_SRGB; | |
2237 | break; | |
2238 | ||
2239 | default: | |
2240 | WARN_ON(1); | |
2241 | break; | |
2242 | } | |
2243 | ||
2244 | return color_space; | |
2245 | } | |
2246 | ||
2247 | /*****************************************************************************/ | |
2248 | ||
2249 | static void fill_stream_properties_from_drm_display_mode( | |
2250 | struct dc_stream *stream, | |
2251 | const struct drm_display_mode *mode_in, | |
2252 | const struct drm_connector *connector) | |
2253 | { | |
2254 | struct dc_crtc_timing *timing_out = &stream->timing; | |
2255 | memset(timing_out, 0, sizeof(struct dc_crtc_timing)); | |
2256 | ||
2257 | timing_out->h_border_left = 0; | |
2258 | timing_out->h_border_right = 0; | |
2259 | timing_out->v_border_top = 0; | |
2260 | timing_out->v_border_bottom = 0; | |
2261 | /* TODO: un-hardcode */ | |
2262 | ||
2263 | if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) | |
2264 | && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) | |
2265 | timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; | |
2266 | else | |
2267 | timing_out->pixel_encoding = PIXEL_ENCODING_RGB; | |
2268 | ||
2269 | timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; | |
2270 | timing_out->display_color_depth = convert_color_depth_from_display_info( | |
2271 | connector); | |
2272 | timing_out->scan_type = SCANNING_TYPE_NODATA; | |
2273 | timing_out->hdmi_vic = 0; | |
2274 | timing_out->vic = drm_match_cea_mode(mode_in); | |
2275 | ||
2276 | timing_out->h_addressable = mode_in->crtc_hdisplay; | |
2277 | timing_out->h_total = mode_in->crtc_htotal; | |
2278 | timing_out->h_sync_width = | |
2279 | mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; | |
2280 | timing_out->h_front_porch = | |
2281 | mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; | |
2282 | timing_out->v_total = mode_in->crtc_vtotal; | |
2283 | timing_out->v_addressable = mode_in->crtc_vdisplay; | |
2284 | timing_out->v_front_porch = | |
2285 | mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; | |
2286 | timing_out->v_sync_width = | |
2287 | mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; | |
2288 | timing_out->pix_clk_khz = mode_in->crtc_clock; | |
2289 | timing_out->aspect_ratio = get_aspect_ratio(mode_in); | |
2290 | if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) | |
2291 | timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; | |
2292 | if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) | |
2293 | timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; | |
2294 | ||
2295 | stream->output_color_space = get_output_color_space(timing_out); | |
2296 | ||
2297 | { | |
2298 | struct dc_transfer_func *tf = dc_create_transfer_func(); | |
2299 | tf->type = TF_TYPE_PREDEFINED; | |
2300 | tf->tf = TRANSFER_FUNCTION_SRGB; | |
2301 | stream->out_transfer_func = tf; | |
2302 | } | |
2303 | } | |
2304 | ||
2305 | static void fill_audio_info( | |
2306 | struct audio_info *audio_info, | |
2307 | const struct drm_connector *drm_connector, | |
2308 | const struct dc_sink *dc_sink) | |
2309 | { | |
2310 | int i = 0; | |
2311 | int cea_revision = 0; | |
2312 | const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; | |
2313 | ||
2314 | audio_info->manufacture_id = edid_caps->manufacturer_id; | |
2315 | audio_info->product_id = edid_caps->product_id; | |
2316 | ||
2317 | cea_revision = drm_connector->display_info.cea_rev; | |
2318 | ||
2319 | while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS && | |
2320 | edid_caps->display_name[i]) { | |
2321 | audio_info->display_name[i] = edid_caps->display_name[i]; | |
2322 | i++; | |
2323 | } | |
2324 | ||
2325 | if(cea_revision >= 3) { | |
2326 | audio_info->mode_count = edid_caps->audio_mode_count; | |
2327 | ||
2328 | for (i = 0; i < audio_info->mode_count; ++i) { | |
2329 | audio_info->modes[i].format_code = | |
2330 | (enum audio_format_code) | |
2331 | (edid_caps->audio_modes[i].format_code); | |
2332 | audio_info->modes[i].channel_count = | |
2333 | edid_caps->audio_modes[i].channel_count; | |
2334 | audio_info->modes[i].sample_rates.all = | |
2335 | edid_caps->audio_modes[i].sample_rate; | |
2336 | audio_info->modes[i].sample_size = | |
2337 | edid_caps->audio_modes[i].sample_size; | |
2338 | } | |
2339 | } | |
2340 | ||
2341 | audio_info->flags.all = edid_caps->speaker_flags; | |
2342 | ||
2343 | /* TODO: We only check for the progressive mode, check for interlace mode too */ | |
2344 | if(drm_connector->latency_present[0]) { | |
2345 | audio_info->video_latency = drm_connector->video_latency[0]; | |
2346 | audio_info->audio_latency = drm_connector->audio_latency[0]; | |
2347 | } | |
2348 | ||
2349 | /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ | |
2350 | ||
2351 | } | |
2352 | ||
2353 | static void copy_crtc_timing_for_drm_display_mode( | |
2354 | const struct drm_display_mode *src_mode, | |
2355 | struct drm_display_mode *dst_mode) | |
2356 | { | |
2357 | dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; | |
2358 | dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; | |
2359 | dst_mode->crtc_clock = src_mode->crtc_clock; | |
2360 | dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; | |
2361 | dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; | |
2362 | dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start; | |
2363 | dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; | |
2364 | dst_mode->crtc_htotal = src_mode->crtc_htotal; | |
2365 | dst_mode->crtc_hskew = src_mode->crtc_hskew; | |
2366 | dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; | |
2367 | dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; | |
2368 | dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; | |
2369 | dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; | |
2370 | dst_mode->crtc_vtotal = src_mode->crtc_vtotal; | |
2371 | } | |
2372 | ||
2373 | static void decide_crtc_timing_for_drm_display_mode( | |
2374 | struct drm_display_mode *drm_mode, | |
2375 | const struct drm_display_mode *native_mode, | |
2376 | bool scale_enabled) | |
2377 | { | |
2378 | if (scale_enabled) { | |
2379 | copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); | |
2380 | } else if (native_mode->clock == drm_mode->clock && | |
2381 | native_mode->htotal == drm_mode->htotal && | |
2382 | native_mode->vtotal == drm_mode->vtotal) { | |
2383 | copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); | |
2384 | } else { | |
2385 | /* no scaling nor amdgpu inserted, no need to patch */ | |
2386 | } | |
2387 | } | |
2388 | ||
2389 | static struct dc_stream *create_stream_for_sink( | |
2390 | struct amdgpu_connector *aconnector, | |
2391 | const struct drm_display_mode *drm_mode, | |
2392 | const struct dm_connector_state *dm_state) | |
2393 | { | |
2394 | struct drm_display_mode *preferred_mode = NULL; | |
2395 | const struct drm_connector *drm_connector; | |
2396 | struct dc_stream *stream = NULL; | |
2397 | struct drm_display_mode mode = *drm_mode; | |
2398 | bool native_mode_found = false; | |
2399 | ||
2400 | if (NULL == aconnector) { | |
2401 | DRM_ERROR("aconnector is NULL!\n"); | |
2402 | goto drm_connector_null; | |
2403 | } | |
2404 | ||
2405 | if (NULL == dm_state) { | |
2406 | DRM_ERROR("dm_state is NULL!\n"); | |
2407 | goto dm_state_null; | |
2408 | } | |
4562236b | 2409 | |
e7b07cee HW |
2410 | drm_connector = &aconnector->base; |
2411 | stream = dc_create_stream_for_sink(aconnector->dc_sink); | |
4562236b | 2412 | |
e7b07cee HW |
2413 | if (NULL == stream) { |
2414 | DRM_ERROR("Failed to create stream for sink!\n"); | |
2415 | goto stream_create_fail; | |
2416 | } | |
2417 | ||
2418 | list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { | |
2419 | /* Search for preferred mode */ | |
2420 | if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { | |
2421 | native_mode_found = true; | |
2422 | break; | |
2423 | } | |
2424 | } | |
2425 | if (!native_mode_found) | |
2426 | preferred_mode = list_first_entry_or_null( | |
2427 | &aconnector->base.modes, | |
2428 | struct drm_display_mode, | |
2429 | head); | |
2430 | ||
2431 | if (NULL == preferred_mode) { | |
2432 | /* This may not be an error, the use case is when we we have no | |
2433 | * usermode calls to reset and set mode upon hotplug. In this | |
2434 | * case, we call set mode ourselves to restore the previous mode | |
2435 | * and the modelist may not be filled in in time. | |
2436 | */ | |
2437 | DRM_INFO("No preferred mode found\n"); | |
2438 | } else { | |
2439 | decide_crtc_timing_for_drm_display_mode( | |
2440 | &mode, preferred_mode, | |
2441 | dm_state->scaling != RMX_OFF); | |
2442 | } | |
2443 | ||
2444 | fill_stream_properties_from_drm_display_mode(stream, | |
2445 | &mode, &aconnector->base); | |
2446 | update_stream_scaling_settings(&mode, dm_state, stream); | |
2447 | ||
2448 | fill_audio_info( | |
2449 | &stream->audio_info, | |
2450 | drm_connector, | |
2451 | aconnector->dc_sink); | |
2452 | ||
2453 | stream_create_fail: | |
2454 | dm_state_null: | |
2455 | drm_connector_null: | |
2456 | return stream; | |
2457 | } | |
2458 | ||
2459 | void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) | |
2460 | { | |
2461 | drm_crtc_cleanup(crtc); | |
2462 | kfree(crtc); | |
2463 | } | |
2464 | ||
2465 | static void dm_crtc_destroy_state(struct drm_crtc *crtc, | |
2466 | struct drm_crtc_state *state) | |
2467 | { | |
2468 | struct dm_crtc_state *cur = to_dm_crtc_state(state); | |
2469 | ||
2470 | /* TODO Destroy dc_stream objects are stream object is flattened */ | |
2471 | if (cur->stream) | |
2472 | dc_stream_release(cur->stream); | |
2473 | ||
2474 | ||
2475 | __drm_atomic_helper_crtc_destroy_state(state); | |
2476 | ||
2477 | ||
2478 | kfree(state); | |
2479 | } | |
2480 | ||
2481 | static void dm_crtc_reset_state(struct drm_crtc *crtc) | |
2482 | { | |
2483 | struct dm_crtc_state *state; | |
2484 | ||
2485 | if (crtc->state) | |
2486 | dm_crtc_destroy_state(crtc, crtc->state); | |
2487 | ||
2488 | state = kzalloc(sizeof(*state), GFP_KERNEL); | |
2489 | if (WARN_ON(!state)) | |
2490 | return; | |
2491 | ||
2492 | crtc->state = &state->base; | |
2493 | crtc->state->crtc = crtc; | |
2494 | ||
2495 | } | |
2496 | ||
2497 | static struct drm_crtc_state * | |
2498 | dm_crtc_duplicate_state(struct drm_crtc *crtc) | |
2499 | { | |
2500 | struct dm_crtc_state *state, *cur; | |
2501 | ||
2502 | cur = to_dm_crtc_state(crtc->state); | |
2503 | ||
2504 | if (WARN_ON(!crtc->state)) | |
2505 | return NULL; | |
2506 | ||
2507 | state = dm_alloc(sizeof(*state)); | |
2508 | ||
2509 | __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); | |
2510 | ||
2511 | if (cur->stream) { | |
2512 | state->stream = cur->stream; | |
2513 | dc_stream_retain(state->stream); | |
2514 | } | |
2515 | ||
2516 | /* TODO Duplicate dc_stream after objects are stream object is flattened */ | |
2517 | ||
2518 | return &state->base; | |
2519 | } | |
2520 | ||
2521 | /* Implemented only the options currently availible for the driver */ | |
2522 | static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { | |
2523 | .reset = dm_crtc_reset_state, | |
2524 | .destroy = amdgpu_dm_crtc_destroy, | |
2525 | .gamma_set = drm_atomic_helper_legacy_gamma_set, | |
2526 | .set_config = drm_atomic_helper_set_config, | |
2527 | .page_flip = drm_atomic_helper_page_flip, | |
2528 | .atomic_duplicate_state = dm_crtc_duplicate_state, | |
2529 | .atomic_destroy_state = dm_crtc_destroy_state, | |
2530 | }; | |
2531 | ||
2532 | static enum drm_connector_status | |
2533 | amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) | |
2534 | { | |
2535 | bool connected; | |
2536 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); | |
2537 | ||
2538 | /* Notes: | |
2539 | * 1. This interface is NOT called in context of HPD irq. | |
2540 | * 2. This interface *is called* in context of user-mode ioctl. Which | |
2541 | * makes it a bad place for *any* MST-related activit. */ | |
2542 | ||
2543 | if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) | |
2544 | connected = (aconnector->dc_sink != NULL); | |
2545 | else | |
2546 | connected = (aconnector->base.force == DRM_FORCE_ON); | |
2547 | ||
2548 | return (connected ? connector_status_connected : | |
2549 | connector_status_disconnected); | |
2550 | } | |
2551 | ||
2552 | int amdgpu_dm_connector_atomic_set_property( | |
2553 | struct drm_connector *connector, | |
2554 | struct drm_connector_state *connector_state, | |
2555 | struct drm_property *property, | |
2556 | uint64_t val) | |
2557 | { | |
2558 | struct drm_device *dev = connector->dev; | |
2559 | struct amdgpu_device *adev = dev->dev_private; | |
2560 | struct dm_connector_state *dm_old_state = | |
2561 | to_dm_connector_state(connector->state); | |
2562 | struct dm_connector_state *dm_new_state = | |
2563 | to_dm_connector_state(connector_state); | |
2564 | ||
2565 | int ret = -EINVAL; | |
2566 | ||
2567 | if (property == dev->mode_config.scaling_mode_property) { | |
2568 | enum amdgpu_rmx_type rmx_type; | |
2569 | ||
2570 | switch (val) { | |
2571 | case DRM_MODE_SCALE_CENTER: | |
2572 | rmx_type = RMX_CENTER; | |
2573 | break; | |
2574 | case DRM_MODE_SCALE_ASPECT: | |
2575 | rmx_type = RMX_ASPECT; | |
2576 | break; | |
2577 | case DRM_MODE_SCALE_FULLSCREEN: | |
2578 | rmx_type = RMX_FULL; | |
2579 | break; | |
2580 | case DRM_MODE_SCALE_NONE: | |
2581 | default: | |
2582 | rmx_type = RMX_OFF; | |
2583 | break; | |
2584 | } | |
2585 | ||
2586 | if (dm_old_state->scaling == rmx_type) | |
2587 | return 0; | |
2588 | ||
2589 | dm_new_state->scaling = rmx_type; | |
2590 | ret = 0; | |
2591 | } else if (property == adev->mode_info.underscan_hborder_property) { | |
2592 | dm_new_state->underscan_hborder = val; | |
2593 | ret = 0; | |
2594 | } else if (property == adev->mode_info.underscan_vborder_property) { | |
2595 | dm_new_state->underscan_vborder = val; | |
2596 | ret = 0; | |
2597 | } else if (property == adev->mode_info.underscan_property) { | |
2598 | dm_new_state->underscan_enable = val; | |
2599 | ret = 0; | |
2600 | } | |
2601 | ||
2602 | return ret; | |
2603 | } | |
2604 | ||
2605 | int amdgpu_dm_connector_atomic_get_property( | |
2606 | struct drm_connector *connector, | |
2607 | const struct drm_connector_state *state, | |
2608 | struct drm_property *property, | |
2609 | uint64_t *val) | |
2610 | { | |
2611 | struct drm_device *dev = connector->dev; | |
2612 | struct amdgpu_device *adev = dev->dev_private; | |
2613 | struct dm_connector_state *dm_state = | |
2614 | to_dm_connector_state(state); | |
2615 | int ret = -EINVAL; | |
2616 | ||
2617 | if (property == dev->mode_config.scaling_mode_property) { | |
2618 | switch (dm_state->scaling) { | |
2619 | case RMX_CENTER: | |
2620 | *val = DRM_MODE_SCALE_CENTER; | |
2621 | break; | |
2622 | case RMX_ASPECT: | |
2623 | *val = DRM_MODE_SCALE_ASPECT; | |
2624 | break; | |
2625 | case RMX_FULL: | |
2626 | *val = DRM_MODE_SCALE_FULLSCREEN; | |
2627 | break; | |
2628 | case RMX_OFF: | |
2629 | default: | |
2630 | *val = DRM_MODE_SCALE_NONE; | |
2631 | break; | |
2632 | } | |
2633 | ret = 0; | |
2634 | } else if (property == adev->mode_info.underscan_hborder_property) { | |
2635 | *val = dm_state->underscan_hborder; | |
2636 | ret = 0; | |
2637 | } else if (property == adev->mode_info.underscan_vborder_property) { | |
2638 | *val = dm_state->underscan_vborder; | |
2639 | ret = 0; | |
2640 | } else if (property == adev->mode_info.underscan_property) { | |
2641 | *val = dm_state->underscan_enable; | |
2642 | ret = 0; | |
2643 | } | |
2644 | return ret; | |
2645 | } | |
2646 | ||
2647 | void amdgpu_dm_connector_destroy(struct drm_connector *connector) | |
2648 | { | |
2649 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); | |
2650 | const struct dc_link *link = aconnector->dc_link; | |
2651 | struct amdgpu_device *adev = connector->dev->dev_private; | |
2652 | struct amdgpu_display_manager *dm = &adev->dm; | |
2653 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ | |
2654 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | |
2655 | ||
2656 | if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { | |
2657 | amdgpu_dm_register_backlight_device(dm); | |
2658 | ||
2659 | if (dm->backlight_dev) { | |
2660 | backlight_device_unregister(dm->backlight_dev); | |
2661 | dm->backlight_dev = NULL; | |
2662 | } | |
2663 | ||
2664 | } | |
2665 | #endif | |
2666 | drm_connector_unregister(connector); | |
2667 | drm_connector_cleanup(connector); | |
2668 | kfree(connector); | |
2669 | } | |
2670 | ||
2671 | void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) | |
2672 | { | |
2673 | struct dm_connector_state *state = | |
2674 | to_dm_connector_state(connector->state); | |
2675 | ||
2676 | kfree(state); | |
2677 | ||
2678 | state = kzalloc(sizeof(*state), GFP_KERNEL); | |
2679 | ||
2680 | if (state) { | |
2681 | state->scaling = RMX_OFF; | |
2682 | state->underscan_enable = false; | |
2683 | state->underscan_hborder = 0; | |
2684 | state->underscan_vborder = 0; | |
2685 | ||
2686 | connector->state = &state->base; | |
2687 | connector->state->connector = connector; | |
2688 | } | |
2689 | } | |
2690 | ||
2691 | struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state( | |
2692 | struct drm_connector *connector) | |
2693 | { | |
2694 | struct dm_connector_state *state = | |
2695 | to_dm_connector_state(connector->state); | |
2696 | ||
2697 | struct dm_connector_state *new_state = | |
2698 | kmemdup(state, sizeof(*state), GFP_KERNEL); | |
2699 | ||
2700 | if (new_state) { | |
2701 | __drm_atomic_helper_connector_duplicate_state(connector, | |
2702 | &new_state->base); | |
2703 | return &new_state->base; | |
2704 | } | |
2705 | ||
2706 | return NULL; | |
2707 | } | |
2708 | ||
2709 | static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { | |
2710 | .reset = amdgpu_dm_connector_funcs_reset, | |
2711 | .detect = amdgpu_dm_connector_detect, | |
2712 | .fill_modes = drm_helper_probe_single_connector_modes, | |
2713 | .destroy = amdgpu_dm_connector_destroy, | |
2714 | .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, | |
2715 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | |
2716 | .atomic_set_property = amdgpu_dm_connector_atomic_set_property, | |
2717 | .atomic_get_property = amdgpu_dm_connector_atomic_get_property | |
2718 | }; | |
2719 | ||
2720 | static struct drm_encoder *best_encoder(struct drm_connector *connector) | |
2721 | { | |
2722 | int enc_id = connector->encoder_ids[0]; | |
2723 | struct drm_mode_object *obj; | |
2724 | struct drm_encoder *encoder; | |
2725 | ||
2726 | DRM_DEBUG_KMS("Finding the best encoder\n"); | |
2727 | ||
2728 | /* pick the encoder ids */ | |
2729 | if (enc_id) { | |
2730 | obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); | |
2731 | if (!obj) { | |
2732 | DRM_ERROR("Couldn't find a matching encoder for our connector\n"); | |
2733 | return NULL; | |
2734 | } | |
2735 | encoder = obj_to_encoder(obj); | |
2736 | return encoder; | |
2737 | } | |
2738 | DRM_ERROR("No encoder id\n"); | |
2739 | return NULL; | |
2740 | } | |
2741 | ||
2742 | static int get_modes(struct drm_connector *connector) | |
2743 | { | |
2744 | return amdgpu_dm_connector_get_modes(connector); | |
2745 | } | |
2746 | ||
2747 | static void create_eml_sink(struct amdgpu_connector *aconnector) | |
2748 | { | |
2749 | struct dc_sink_init_data init_params = { | |
2750 | .link = aconnector->dc_link, | |
2751 | .sink_signal = SIGNAL_TYPE_VIRTUAL | |
2752 | }; | |
2753 | struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data; | |
2754 | ||
2755 | if (!aconnector->base.edid_blob_ptr || | |
2756 | !aconnector->base.edid_blob_ptr->data) { | |
2757 | DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", | |
2758 | aconnector->base.name); | |
2759 | ||
2760 | aconnector->base.force = DRM_FORCE_OFF; | |
2761 | aconnector->base.override_edid = false; | |
2762 | return; | |
2763 | } | |
2764 | ||
2765 | aconnector->edid = edid; | |
2766 | ||
2767 | aconnector->dc_em_sink = dc_link_add_remote_sink( | |
2768 | aconnector->dc_link, | |
2769 | (uint8_t *)edid, | |
2770 | (edid->extensions + 1) * EDID_LENGTH, | |
2771 | &init_params); | |
2772 | ||
2773 | if (aconnector->base.force | |
2774 | == DRM_FORCE_ON) | |
2775 | aconnector->dc_sink = aconnector->dc_link->local_sink ? | |
2776 | aconnector->dc_link->local_sink : | |
2777 | aconnector->dc_em_sink; | |
2778 | } | |
2779 | ||
2780 | static void handle_edid_mgmt(struct amdgpu_connector *aconnector) | |
2781 | { | |
2782 | struct dc_link *link = (struct dc_link *)aconnector->dc_link; | |
2783 | ||
2784 | /* In case of headless boot with force on for DP managed connector | |
2785 | * Those settings have to be != 0 to get initial modeset | |
2786 | */ | |
2787 | if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { | |
2788 | link->verified_link_cap.lane_count = LANE_COUNT_FOUR; | |
2789 | link->verified_link_cap.link_rate = LINK_RATE_HIGH2; | |
2790 | } | |
2791 | ||
2792 | ||
2793 | aconnector->base.override_edid = true; | |
2794 | create_eml_sink(aconnector); | |
2795 | } | |
2796 | ||
2797 | int amdgpu_dm_connector_mode_valid( | |
2798 | struct drm_connector *connector, | |
2799 | struct drm_display_mode *mode) | |
2800 | { | |
2801 | int result = MODE_ERROR; | |
2802 | struct dc_sink *dc_sink; | |
2803 | struct amdgpu_device *adev = connector->dev->dev_private; | |
2804 | /* TODO: Unhardcode stream count */ | |
2805 | struct dc_stream *stream; | |
2806 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); | |
2807 | ||
2808 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | |
2809 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) | |
2810 | return result; | |
2811 | ||
2812 | /* Only run this the first time mode_valid is called to initilialize | |
2813 | * EDID mgmt | |
2814 | */ | |
2815 | if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && | |
2816 | !aconnector->dc_em_sink) | |
2817 | handle_edid_mgmt(aconnector); | |
2818 | ||
2819 | dc_sink = to_amdgpu_connector(connector)->dc_sink; | |
2820 | ||
2821 | if (NULL == dc_sink) { | |
2822 | DRM_ERROR("dc_sink is NULL!\n"); | |
2823 | goto fail; | |
2824 | } | |
2825 | ||
2826 | stream = dc_create_stream_for_sink(dc_sink); | |
2827 | if (NULL == stream) { | |
2828 | DRM_ERROR("Failed to create stream for sink!\n"); | |
2829 | goto fail; | |
2830 | } | |
2831 | ||
2832 | drm_mode_set_crtcinfo(mode, 0); | |
2833 | fill_stream_properties_from_drm_display_mode(stream, mode, connector); | |
2834 | ||
2835 | stream->src.width = mode->hdisplay; | |
2836 | stream->src.height = mode->vdisplay; | |
2837 | stream->dst = stream->src; | |
2838 | ||
2839 | if (dc_validate_stream(adev->dm.dc, stream)) | |
2840 | result = MODE_OK; | |
2841 | ||
2842 | dc_stream_release(stream); | |
2843 | ||
2844 | fail: | |
2845 | /* TODO: error handling*/ | |
2846 | return result; | |
2847 | } | |
2848 | ||
2849 | static const struct drm_connector_helper_funcs | |
2850 | amdgpu_dm_connector_helper_funcs = { | |
2851 | /* | |
2852 | * If hotplug a second bigger display in FB Con mode, bigger resolution | |
2853 | * modes will be filtered by drm_mode_validate_size(), and those modes | |
2854 | * is missing after user start lightdm. So we need to renew modes list. | |
2855 | * in get_modes call back, not just return the modes count | |
2856 | */ | |
2857 | .get_modes = get_modes, | |
2858 | .mode_valid = amdgpu_dm_connector_mode_valid, | |
2859 | .best_encoder = best_encoder | |
2860 | }; | |
2861 | ||
2862 | static void dm_crtc_helper_disable(struct drm_crtc *crtc) | |
2863 | { | |
2864 | } | |
2865 | ||
2866 | static int dm_crtc_helper_atomic_check( | |
2867 | struct drm_crtc *crtc, | |
2868 | struct drm_crtc_state *state) | |
2869 | { | |
2870 | struct amdgpu_device *adev = crtc->dev->dev_private; | |
2871 | struct dc *dc = adev->dm.dc; | |
2872 | struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); | |
2873 | int ret = -EINVAL; | |
2874 | ||
2875 | if (unlikely(!dm_crtc_state->stream && modeset_required(state))) { | |
2876 | WARN_ON(1); | |
2877 | return ret; | |
2878 | } | |
2879 | ||
2880 | /* In some use cases, like reset, no stream is attached */ | |
2881 | if (!dm_crtc_state->stream) | |
2882 | return 0; | |
2883 | ||
2884 | if (dc_validate_stream(dc, dm_crtc_state->stream)) | |
2885 | return 0; | |
2886 | ||
2887 | return ret; | |
2888 | } | |
2889 | ||
2890 | static bool dm_crtc_helper_mode_fixup( | |
2891 | struct drm_crtc *crtc, | |
2892 | const struct drm_display_mode *mode, | |
2893 | struct drm_display_mode *adjusted_mode) | |
2894 | { | |
2895 | return true; | |
2896 | } | |
2897 | ||
2898 | static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { | |
2899 | .disable = dm_crtc_helper_disable, | |
2900 | .atomic_check = dm_crtc_helper_atomic_check, | |
2901 | .mode_fixup = dm_crtc_helper_mode_fixup | |
2902 | }; | |
2903 | ||
2904 | static void dm_encoder_helper_disable(struct drm_encoder *encoder) | |
2905 | { | |
2906 | ||
2907 | } | |
2908 | ||
2909 | static int dm_encoder_helper_atomic_check( | |
2910 | struct drm_encoder *encoder, | |
2911 | struct drm_crtc_state *crtc_state, | |
2912 | struct drm_connector_state *conn_state) | |
2913 | { | |
2914 | return 0; | |
2915 | } | |
2916 | ||
2917 | const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { | |
2918 | .disable = dm_encoder_helper_disable, | |
2919 | .atomic_check = dm_encoder_helper_atomic_check | |
2920 | }; | |
2921 | ||
2922 | static void dm_drm_plane_reset(struct drm_plane *plane) | |
2923 | { | |
2924 | struct dm_plane_state *amdgpu_state = NULL; | |
2925 | ||
2926 | if (plane->state) | |
2927 | plane->funcs->atomic_destroy_state(plane, plane->state); | |
2928 | ||
2929 | amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); | |
2930 | ||
2931 | if (amdgpu_state) { | |
2932 | plane->state = &amdgpu_state->base; | |
2933 | plane->state->plane = plane; | |
2934 | plane->state->rotation = DRM_MODE_ROTATE_0; | |
2935 | } else | |
2936 | WARN_ON(1); | |
2937 | } | |
2938 | ||
2939 | static struct drm_plane_state * | |
2940 | dm_drm_plane_duplicate_state(struct drm_plane *plane) | |
2941 | { | |
2942 | struct dm_plane_state *dm_plane_state, *old_dm_plane_state; | |
2943 | ||
2944 | old_dm_plane_state = to_dm_plane_state(plane->state); | |
2945 | dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); | |
2946 | if (!dm_plane_state) | |
2947 | return NULL; | |
2948 | ||
2949 | __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); | |
2950 | ||
2951 | if (old_dm_plane_state->surface) { | |
2952 | dm_plane_state->surface = old_dm_plane_state->surface; | |
2953 | dc_surface_retain(dm_plane_state->surface); | |
2954 | } | |
2955 | ||
2956 | return &dm_plane_state->base; | |
2957 | } | |
2958 | ||
2959 | void dm_drm_plane_destroy_state(struct drm_plane *plane, | |
2960 | struct drm_plane_state *state) | |
2961 | { | |
2962 | struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); | |
2963 | ||
2964 | if (dm_plane_state->surface) | |
2965 | dc_surface_release(dm_plane_state->surface); | |
2966 | ||
2967 | __drm_atomic_helper_plane_destroy_state(state); | |
2968 | kfree(dm_plane_state); | |
2969 | } | |
2970 | ||
2971 | static const struct drm_plane_funcs dm_plane_funcs = { | |
2972 | .update_plane = drm_atomic_helper_update_plane, | |
2973 | .disable_plane = drm_atomic_helper_disable_plane, | |
2974 | .destroy = drm_plane_cleanup, | |
2975 | .reset = dm_drm_plane_reset, | |
2976 | .atomic_duplicate_state = dm_drm_plane_duplicate_state, | |
2977 | .atomic_destroy_state = dm_drm_plane_destroy_state, | |
2978 | }; | |
2979 | ||
2980 | static int dm_plane_helper_prepare_fb( | |
2981 | struct drm_plane *plane, | |
2982 | struct drm_plane_state *new_state) | |
2983 | { | |
2984 | struct amdgpu_framebuffer *afb; | |
2985 | struct drm_gem_object *obj; | |
2986 | struct amdgpu_bo *rbo; | |
2987 | int r; | |
2988 | struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; | |
2989 | unsigned int awidth; | |
2990 | ||
2991 | dm_plane_state_old = to_dm_plane_state(plane->state); | |
2992 | dm_plane_state_new = to_dm_plane_state(new_state); | |
2993 | ||
2994 | if (!new_state->fb) { | |
2995 | DRM_DEBUG_KMS("No FB bound\n"); | |
2996 | return 0; | |
2997 | } | |
2998 | ||
2999 | afb = to_amdgpu_framebuffer(new_state->fb); | |
3000 | ||
3001 | obj = afb->obj; | |
3002 | rbo = gem_to_amdgpu_bo(obj); | |
3003 | r = amdgpu_bo_reserve(rbo, false); | |
3004 | if (unlikely(r != 0)) | |
3005 | return r; | |
3006 | ||
3007 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address); | |
3008 | ||
3009 | ||
3010 | amdgpu_bo_unreserve(rbo); | |
3011 | ||
3012 | if (unlikely(r != 0)) { | |
3013 | DRM_ERROR("Failed to pin framebuffer\n"); | |
3014 | return r; | |
3015 | } | |
3016 | ||
3017 | amdgpu_bo_ref(rbo); | |
3018 | ||
3019 | if (dm_plane_state_new->surface && | |
3020 | dm_plane_state_old->surface != dm_plane_state_new->surface) { | |
3021 | struct dc_surface *surface = dm_plane_state_new->surface; | |
3022 | ||
3023 | if (surface->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { | |
3024 | surface->address.grph.addr.low_part = lower_32_bits(afb->address); | |
3025 | surface->address.grph.addr.high_part = upper_32_bits(afb->address); | |
3026 | } else { | |
3027 | awidth = ALIGN(new_state->fb->width, 64); | |
3028 | surface->address.video_progressive.luma_addr.low_part | |
3029 | = lower_32_bits(afb->address); | |
3030 | surface->address.video_progressive.chroma_addr.low_part | |
3031 | = lower_32_bits(afb->address) + | |
3032 | (awidth * new_state->fb->height); | |
3033 | } | |
3034 | } | |
3035 | ||
3036 | /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer | |
3037 | * prepare and cleanup in drm_atomic_helper_prepare_planes | |
3038 | * and drm_atomic_helper_cleanup_planes because fb doens't in s3. | |
3039 | * IN 4.10 kernel this code should be removed and amdgpu_device_suspend | |
3040 | * code touching fram buffers should be avoided for DC. | |
3041 | */ | |
3042 | if (plane->type == DRM_PLANE_TYPE_CURSOR) { | |
3043 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc); | |
3044 | ||
3045 | acrtc->cursor_bo = obj; | |
3046 | } | |
3047 | return 0; | |
3048 | } | |
3049 | ||
3050 | static void dm_plane_helper_cleanup_fb( | |
3051 | struct drm_plane *plane, | |
3052 | struct drm_plane_state *old_state) | |
3053 | { | |
3054 | struct amdgpu_bo *rbo; | |
3055 | struct amdgpu_framebuffer *afb; | |
3056 | int r; | |
3057 | ||
3058 | if (!old_state->fb) | |
3059 | return; | |
3060 | ||
3061 | afb = to_amdgpu_framebuffer(old_state->fb); | |
3062 | rbo = gem_to_amdgpu_bo(afb->obj); | |
3063 | r = amdgpu_bo_reserve(rbo, false); | |
3064 | if (unlikely(r)) { | |
3065 | DRM_ERROR("failed to reserve rbo before unpin\n"); | |
3066 | return; | |
3067 | } else { | |
3068 | amdgpu_bo_unpin(rbo); | |
3069 | amdgpu_bo_unreserve(rbo); | |
3070 | amdgpu_bo_unref(&rbo); | |
3071 | }; | |
3072 | } | |
3073 | ||
3074 | int dm_create_validation_set_for_connector(struct drm_connector *connector, | |
3075 | struct drm_display_mode *mode, struct dc_validation_set *val_set) | |
3076 | { | |
3077 | int result = MODE_ERROR; | |
3078 | struct dc_sink *dc_sink = | |
3079 | to_amdgpu_connector(connector)->dc_sink; | |
3080 | /* TODO: Unhardcode stream count */ | |
3081 | struct dc_stream *stream; | |
3082 | ||
3083 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || | |
3084 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) | |
3085 | return result; | |
3086 | ||
3087 | if (NULL == dc_sink) { | |
3088 | DRM_ERROR("dc_sink is NULL!\n"); | |
3089 | return result; | |
3090 | } | |
3091 | ||
3092 | stream = dc_create_stream_for_sink(dc_sink); | |
3093 | ||
3094 | if (NULL == stream) { | |
3095 | DRM_ERROR("Failed to create stream for sink!\n"); | |
3096 | return result; | |
3097 | } | |
3098 | ||
3099 | drm_mode_set_crtcinfo(mode, 0); | |
3100 | ||
3101 | fill_stream_properties_from_drm_display_mode(stream, mode, connector); | |
3102 | ||
3103 | val_set->stream = stream; | |
3104 | ||
3105 | stream->src.width = mode->hdisplay; | |
3106 | stream->src.height = mode->vdisplay; | |
3107 | stream->dst = stream->src; | |
3108 | ||
3109 | return MODE_OK; | |
3110 | } | |
3111 | ||
3112 | static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { | |
3113 | .prepare_fb = dm_plane_helper_prepare_fb, | |
3114 | .cleanup_fb = dm_plane_helper_cleanup_fb, | |
3115 | }; | |
3116 | ||
3117 | /* | |
3118 | * TODO: these are currently initialized to rgb formats only. | |
3119 | * For future use cases we should either initialize them dynamically based on | |
3120 | * plane capabilities, or initialize this array to all formats, so internal drm | |
3121 | * check will succeed, and let DC to implement proper check | |
3122 | */ | |
3123 | static uint32_t rgb_formats[] = { | |
3124 | DRM_FORMAT_RGB888, | |
3125 | DRM_FORMAT_XRGB8888, | |
3126 | DRM_FORMAT_ARGB8888, | |
3127 | DRM_FORMAT_RGBA8888, | |
3128 | DRM_FORMAT_XRGB2101010, | |
3129 | DRM_FORMAT_XBGR2101010, | |
3130 | DRM_FORMAT_ARGB2101010, | |
3131 | DRM_FORMAT_ABGR2101010, | |
3132 | }; | |
3133 | ||
3134 | static uint32_t yuv_formats[] = { | |
3135 | DRM_FORMAT_NV12, | |
3136 | DRM_FORMAT_NV21, | |
3137 | }; | |
3138 | ||
3139 | static const u32 cursor_formats[] = { | |
3140 | DRM_FORMAT_ARGB8888 | |
3141 | }; | |
3142 | ||
3143 | int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | |
3144 | struct amdgpu_plane *aplane, | |
3145 | unsigned long possible_crtcs) | |
3146 | { | |
3147 | int res = -EPERM; | |
3148 | ||
3149 | switch (aplane->base.type) { | |
3150 | case DRM_PLANE_TYPE_PRIMARY: | |
3151 | aplane->base.format_default = true; | |
3152 | ||
3153 | res = drm_universal_plane_init( | |
3154 | dm->adev->ddev, | |
3155 | &aplane->base, | |
3156 | possible_crtcs, | |
3157 | &dm_plane_funcs, | |
3158 | rgb_formats, | |
3159 | ARRAY_SIZE(rgb_formats), | |
3160 | NULL, aplane->base.type, NULL); | |
3161 | break; | |
3162 | case DRM_PLANE_TYPE_OVERLAY: | |
3163 | res = drm_universal_plane_init( | |
3164 | dm->adev->ddev, | |
3165 | &aplane->base, | |
3166 | possible_crtcs, | |
3167 | &dm_plane_funcs, | |
3168 | yuv_formats, | |
3169 | ARRAY_SIZE(yuv_formats), | |
3170 | NULL, aplane->base.type, NULL); | |
3171 | break; | |
3172 | case DRM_PLANE_TYPE_CURSOR: | |
3173 | res = drm_universal_plane_init( | |
3174 | dm->adev->ddev, | |
3175 | &aplane->base, | |
3176 | possible_crtcs, | |
3177 | &dm_plane_funcs, | |
3178 | cursor_formats, | |
3179 | ARRAY_SIZE(cursor_formats), | |
3180 | NULL, aplane->base.type, NULL); | |
3181 | break; | |
3182 | } | |
3183 | ||
3184 | drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs); | |
3185 | ||
3186 | return res; | |
3187 | } | |
3188 | ||
3189 | int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | |
3190 | struct drm_plane *plane, | |
3191 | uint32_t crtc_index) | |
3192 | { | |
3193 | struct amdgpu_crtc *acrtc = NULL; | |
3194 | struct amdgpu_plane *cursor_plane; | |
3195 | ||
3196 | int res = -ENOMEM; | |
3197 | ||
3198 | cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); | |
3199 | if (!cursor_plane) | |
3200 | goto fail; | |
3201 | ||
3202 | cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR; | |
3203 | res = amdgpu_dm_plane_init(dm, cursor_plane, 0); | |
3204 | ||
3205 | acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); | |
3206 | if (!acrtc) | |
3207 | goto fail; | |
3208 | ||
3209 | res = drm_crtc_init_with_planes( | |
3210 | dm->ddev, | |
3211 | &acrtc->base, | |
3212 | plane, | |
3213 | &cursor_plane->base, | |
3214 | &amdgpu_dm_crtc_funcs, NULL); | |
3215 | ||
3216 | if (res) | |
3217 | goto fail; | |
3218 | ||
3219 | drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); | |
3220 | ||
3221 | acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; | |
3222 | acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; | |
3223 | ||
3224 | acrtc->crtc_id = crtc_index; | |
3225 | acrtc->base.enabled = false; | |
3226 | ||
3227 | dm->adev->mode_info.crtcs[crtc_index] = acrtc; | |
3228 | drm_mode_crtc_set_gamma_size(&acrtc->base, 256); | |
3229 | ||
3230 | return 0; | |
3231 | ||
3232 | fail: | |
3233 | if (acrtc) | |
3234 | kfree(acrtc); | |
3235 | if (cursor_plane) | |
3236 | kfree(cursor_plane); | |
3237 | acrtc->crtc_id = -1; | |
3238 | return res; | |
3239 | } | |
3240 | ||
3241 | ||
3242 | static int to_drm_connector_type(enum signal_type st) | |
3243 | { | |
3244 | switch (st) { | |
3245 | case SIGNAL_TYPE_HDMI_TYPE_A: | |
3246 | return DRM_MODE_CONNECTOR_HDMIA; | |
3247 | case SIGNAL_TYPE_EDP: | |
3248 | return DRM_MODE_CONNECTOR_eDP; | |
3249 | case SIGNAL_TYPE_RGB: | |
3250 | return DRM_MODE_CONNECTOR_VGA; | |
3251 | case SIGNAL_TYPE_DISPLAY_PORT: | |
3252 | case SIGNAL_TYPE_DISPLAY_PORT_MST: | |
3253 | return DRM_MODE_CONNECTOR_DisplayPort; | |
3254 | case SIGNAL_TYPE_DVI_DUAL_LINK: | |
3255 | case SIGNAL_TYPE_DVI_SINGLE_LINK: | |
3256 | return DRM_MODE_CONNECTOR_DVID; | |
3257 | case SIGNAL_TYPE_VIRTUAL: | |
3258 | return DRM_MODE_CONNECTOR_VIRTUAL; | |
3259 | ||
3260 | default: | |
3261 | return DRM_MODE_CONNECTOR_Unknown; | |
3262 | } | |
3263 | } | |
3264 | ||
3265 | static void amdgpu_dm_get_native_mode(struct drm_connector *connector) | |
3266 | { | |
3267 | const struct drm_connector_helper_funcs *helper = | |
3268 | connector->helper_private; | |
3269 | struct drm_encoder *encoder; | |
3270 | struct amdgpu_encoder *amdgpu_encoder; | |
3271 | ||
3272 | encoder = helper->best_encoder(connector); | |
3273 | ||
3274 | if (encoder == NULL) | |
3275 | return; | |
3276 | ||
3277 | amdgpu_encoder = to_amdgpu_encoder(encoder); | |
3278 | ||
3279 | amdgpu_encoder->native_mode.clock = 0; | |
3280 | ||
3281 | if (!list_empty(&connector->probed_modes)) { | |
3282 | struct drm_display_mode *preferred_mode = NULL; | |
3283 | list_for_each_entry(preferred_mode, | |
3284 | &connector->probed_modes, | |
3285 | head) { | |
3286 | if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { | |
3287 | amdgpu_encoder->native_mode = *preferred_mode; | |
3288 | } | |
3289 | break; | |
3290 | } | |
3291 | ||
3292 | } | |
3293 | } | |
3294 | ||
3295 | static struct drm_display_mode *amdgpu_dm_create_common_mode( | |
3296 | struct drm_encoder *encoder, char *name, | |
3297 | int hdisplay, int vdisplay) | |
3298 | { | |
3299 | struct drm_device *dev = encoder->dev; | |
3300 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | |
3301 | struct drm_display_mode *mode = NULL; | |
3302 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | |
3303 | ||
3304 | mode = drm_mode_duplicate(dev, native_mode); | |
3305 | ||
3306 | if(mode == NULL) | |
3307 | return NULL; | |
3308 | ||
3309 | mode->hdisplay = hdisplay; | |
3310 | mode->vdisplay = vdisplay; | |
3311 | mode->type &= ~DRM_MODE_TYPE_PREFERRED; | |
3312 | strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN); | |
3313 | ||
3314 | return mode; | |
3315 | ||
3316 | } | |
3317 | ||
3318 | static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, | |
3319 | struct drm_connector *connector) | |
3320 | { | |
3321 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | |
3322 | struct drm_display_mode *mode = NULL; | |
3323 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; | |
3324 | struct amdgpu_connector *amdgpu_connector = | |
3325 | to_amdgpu_connector(connector); | |
3326 | int i; | |
3327 | int n; | |
3328 | struct mode_size { | |
3329 | char name[DRM_DISPLAY_MODE_LEN]; | |
3330 | int w; | |
3331 | int h; | |
3332 | }common_modes[] = { | |
3333 | { "640x480", 640, 480}, | |
3334 | { "800x600", 800, 600}, | |
3335 | { "1024x768", 1024, 768}, | |
3336 | { "1280x720", 1280, 720}, | |
3337 | { "1280x800", 1280, 800}, | |
3338 | {"1280x1024", 1280, 1024}, | |
3339 | { "1440x900", 1440, 900}, | |
3340 | {"1680x1050", 1680, 1050}, | |
3341 | {"1600x1200", 1600, 1200}, | |
3342 | {"1920x1080", 1920, 1080}, | |
3343 | {"1920x1200", 1920, 1200} | |
3344 | }; | |
3345 | ||
3346 | n = sizeof(common_modes) / sizeof(common_modes[0]); | |
3347 | ||
3348 | for (i = 0; i < n; i++) { | |
3349 | struct drm_display_mode *curmode = NULL; | |
3350 | bool mode_existed = false; | |
3351 | ||
3352 | if (common_modes[i].w > native_mode->hdisplay || | |
3353 | common_modes[i].h > native_mode->vdisplay || | |
3354 | (common_modes[i].w == native_mode->hdisplay && | |
3355 | common_modes[i].h == native_mode->vdisplay)) | |
3356 | continue; | |
3357 | ||
3358 | list_for_each_entry(curmode, &connector->probed_modes, head) { | |
3359 | if (common_modes[i].w == curmode->hdisplay && | |
3360 | common_modes[i].h == curmode->vdisplay) { | |
3361 | mode_existed = true; | |
3362 | break; | |
3363 | } | |
3364 | } | |
3365 | ||
3366 | if (mode_existed) | |
3367 | continue; | |
3368 | ||
3369 | mode = amdgpu_dm_create_common_mode(encoder, | |
3370 | common_modes[i].name, common_modes[i].w, | |
3371 | common_modes[i].h); | |
3372 | drm_mode_probed_add(connector, mode); | |
3373 | amdgpu_connector->num_modes++; | |
3374 | } | |
3375 | } | |
3376 | ||
3377 | static void amdgpu_dm_connector_ddc_get_modes( | |
3378 | struct drm_connector *connector, | |
3379 | struct edid *edid) | |
3380 | { | |
3381 | struct amdgpu_connector *amdgpu_connector = | |
3382 | to_amdgpu_connector(connector); | |
3383 | ||
3384 | if (edid) { | |
3385 | /* empty probed_modes */ | |
3386 | INIT_LIST_HEAD(&connector->probed_modes); | |
3387 | amdgpu_connector->num_modes = | |
3388 | drm_add_edid_modes(connector, edid); | |
3389 | ||
3390 | drm_edid_to_eld(connector, edid); | |
3391 | ||
3392 | amdgpu_dm_get_native_mode(connector); | |
3393 | } else | |
3394 | amdgpu_connector->num_modes = 0; | |
3395 | } | |
3396 | ||
3397 | int amdgpu_dm_connector_get_modes(struct drm_connector *connector) | |
3398 | { | |
3399 | const struct drm_connector_helper_funcs *helper = | |
3400 | connector->helper_private; | |
3401 | struct amdgpu_connector *amdgpu_connector = | |
3402 | to_amdgpu_connector(connector); | |
3403 | struct drm_encoder *encoder; | |
3404 | struct edid *edid = amdgpu_connector->edid; | |
3405 | ||
3406 | encoder = helper->best_encoder(connector); | |
3407 | ||
3408 | amdgpu_dm_connector_ddc_get_modes(connector, edid); | |
3409 | amdgpu_dm_connector_add_common_modes(encoder, connector); | |
3410 | return amdgpu_connector->num_modes; | |
3411 | } | |
3412 | ||
3413 | void amdgpu_dm_connector_init_helper( | |
3414 | struct amdgpu_display_manager *dm, | |
3415 | struct amdgpu_connector *aconnector, | |
3416 | int connector_type, | |
3417 | struct dc_link *link, | |
3418 | int link_index) | |
3419 | { | |
3420 | struct amdgpu_device *adev = dm->ddev->dev_private; | |
3421 | ||
3422 | aconnector->connector_id = link_index; | |
3423 | aconnector->dc_link = link; | |
3424 | aconnector->base.interlace_allowed = false; | |
3425 | aconnector->base.doublescan_allowed = false; | |
3426 | aconnector->base.stereo_allowed = false; | |
3427 | aconnector->base.dpms = DRM_MODE_DPMS_OFF; | |
3428 | aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ | |
3429 | ||
3430 | mutex_init(&aconnector->hpd_lock); | |
3431 | ||
3432 | /*configure suport HPD hot plug connector_>polled default value is 0 | |
3433 | * which means HPD hot plug not supported*/ | |
3434 | switch (connector_type) { | |
3435 | case DRM_MODE_CONNECTOR_HDMIA: | |
3436 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | |
3437 | break; | |
3438 | case DRM_MODE_CONNECTOR_DisplayPort: | |
3439 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | |
3440 | break; | |
3441 | case DRM_MODE_CONNECTOR_DVID: | |
3442 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | |
3443 | break; | |
3444 | default: | |
3445 | break; | |
3446 | } | |
3447 | ||
3448 | drm_object_attach_property(&aconnector->base.base, | |
3449 | dm->ddev->mode_config.scaling_mode_property, | |
3450 | DRM_MODE_SCALE_NONE); | |
3451 | ||
3452 | drm_object_attach_property(&aconnector->base.base, | |
3453 | adev->mode_info.underscan_property, | |
3454 | UNDERSCAN_OFF); | |
3455 | drm_object_attach_property(&aconnector->base.base, | |
3456 | adev->mode_info.underscan_hborder_property, | |
3457 | 0); | |
3458 | drm_object_attach_property(&aconnector->base.base, | |
3459 | adev->mode_info.underscan_vborder_property, | |
3460 | 0); | |
3461 | ||
3462 | } | |
3463 | ||
3464 | int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, | |
3465 | struct i2c_msg *msgs, int num) | |
3466 | { | |
3467 | struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); | |
3468 | struct ddc_service *ddc_service = i2c->ddc_service; | |
3469 | struct i2c_command cmd; | |
3470 | int i; | |
3471 | int result = -EIO; | |
3472 | ||
3473 | cmd.payloads = kzalloc(num * sizeof(struct i2c_payload), GFP_KERNEL); | |
3474 | ||
3475 | if (!cmd.payloads) | |
3476 | return result; | |
3477 | ||
3478 | cmd.number_of_payloads = num; | |
3479 | cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; | |
3480 | cmd.speed = 100; | |
3481 | ||
3482 | for (i = 0; i < num; i++) { | |
3483 | cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); | |
3484 | cmd.payloads[i].address = msgs[i].addr; | |
3485 | cmd.payloads[i].length = msgs[i].len; | |
3486 | cmd.payloads[i].data = msgs[i].buf; | |
3487 | } | |
3488 | ||
3489 | if (dal_i2caux_submit_i2c_command( | |
3490 | ddc_service->ctx->i2caux, | |
3491 | ddc_service->ddc_pin, | |
3492 | &cmd)) | |
3493 | result = num; | |
3494 | ||
3495 | kfree(cmd.payloads); | |
3496 | return result; | |
3497 | } | |
3498 | ||
3499 | u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) | |
3500 | { | |
3501 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; | |
3502 | } | |
3503 | ||
3504 | static const struct i2c_algorithm amdgpu_dm_i2c_algo = { | |
3505 | .master_xfer = amdgpu_dm_i2c_xfer, | |
3506 | .functionality = amdgpu_dm_i2c_func, | |
3507 | }; | |
3508 | ||
3509 | static struct amdgpu_i2c_adapter *create_i2c( | |
3510 | struct ddc_service *ddc_service, | |
3511 | int link_index, | |
3512 | int *res) | |
3513 | { | |
3514 | struct amdgpu_device *adev = ddc_service->ctx->driver_context; | |
3515 | struct amdgpu_i2c_adapter *i2c; | |
3516 | ||
3517 | i2c = kzalloc(sizeof (struct amdgpu_i2c_adapter), GFP_KERNEL); | |
3518 | i2c->base.owner = THIS_MODULE; | |
3519 | i2c->base.class = I2C_CLASS_DDC; | |
3520 | i2c->base.dev.parent = &adev->pdev->dev; | |
3521 | i2c->base.algo = &amdgpu_dm_i2c_algo; | |
3522 | snprintf(i2c->base.name, sizeof (i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); | |
3523 | i2c_set_adapdata(&i2c->base, i2c); | |
3524 | i2c->ddc_service = ddc_service; | |
3525 | ||
3526 | return i2c; | |
3527 | } | |
3528 | ||
3529 | /* Note: this function assumes that dc_link_detect() was called for the | |
3530 | * dc_link which will be represented by this aconnector. */ | |
3531 | int amdgpu_dm_connector_init( | |
3532 | struct amdgpu_display_manager *dm, | |
3533 | struct amdgpu_connector *aconnector, | |
3534 | uint32_t link_index, | |
3535 | struct amdgpu_encoder *aencoder) | |
3536 | { | |
3537 | int res = 0; | |
3538 | int connector_type; | |
3539 | struct dc *dc = dm->dc; | |
3540 | struct dc_link *link = dc_get_link_at_index(dc, link_index); | |
3541 | struct amdgpu_i2c_adapter *i2c; | |
3542 | ((struct dc_link *)link)->priv = aconnector; | |
3543 | ||
3544 | DRM_DEBUG_KMS("%s()\n", __func__); | |
3545 | ||
3546 | i2c = create_i2c(link->ddc, link->link_index, &res); | |
3547 | aconnector->i2c = i2c; | |
3548 | res = i2c_add_adapter(&i2c->base); | |
3549 | ||
3550 | if (res) { | |
3551 | DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); | |
3552 | goto out_free; | |
3553 | } | |
3554 | ||
3555 | connector_type = to_drm_connector_type(link->connector_signal); | |
3556 | ||
3557 | res = drm_connector_init( | |
3558 | dm->ddev, | |
3559 | &aconnector->base, | |
3560 | &amdgpu_dm_connector_funcs, | |
3561 | connector_type); | |
3562 | ||
3563 | if (res) { | |
3564 | DRM_ERROR("connector_init failed\n"); | |
3565 | aconnector->connector_id = -1; | |
3566 | goto out_free; | |
3567 | } | |
3568 | ||
3569 | drm_connector_helper_add( | |
3570 | &aconnector->base, | |
3571 | &amdgpu_dm_connector_helper_funcs); | |
3572 | ||
3573 | amdgpu_dm_connector_init_helper( | |
3574 | dm, | |
3575 | aconnector, | |
3576 | connector_type, | |
3577 | link, | |
3578 | link_index); | |
3579 | ||
3580 | drm_mode_connector_attach_encoder( | |
3581 | &aconnector->base, &aencoder->base); | |
3582 | ||
3583 | drm_connector_register(&aconnector->base); | |
3584 | ||
3585 | if (connector_type == DRM_MODE_CONNECTOR_DisplayPort | |
3586 | || connector_type == DRM_MODE_CONNECTOR_eDP) | |
3587 | amdgpu_dm_initialize_dp_connector(dm, aconnector); | |
3588 | ||
3589 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ | |
3590 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | |
3591 | ||
3592 | /* NOTE: this currently will create backlight device even if a panel | |
3593 | * is not connected to the eDP/LVDS connector. | |
3594 | * | |
3595 | * This is less than ideal but we don't have sink information at this | |
3596 | * stage since detection happens after. We can't do detection earlier | |
3597 | * since MST detection needs connectors to be created first. | |
3598 | */ | |
3599 | if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { | |
3600 | /* Event if registration failed, we should continue with | |
3601 | * DM initialization because not having a backlight control | |
3602 | * is better then a black screen. */ | |
3603 | amdgpu_dm_register_backlight_device(dm); | |
3604 | ||
3605 | if (dm->backlight_dev) | |
3606 | dm->backlight_link = link; | |
3607 | } | |
3608 | #endif | |
3609 | ||
3610 | out_free: | |
3611 | if (res) { | |
3612 | kfree(i2c); | |
3613 | aconnector->i2c = NULL; | |
3614 | } | |
3615 | return res; | |
3616 | } | |
3617 | ||
3618 | int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) | |
3619 | { | |
3620 | switch (adev->mode_info.num_crtc) { | |
3621 | case 1: | |
3622 | return 0x1; | |
3623 | case 2: | |
3624 | return 0x3; | |
3625 | case 3: | |
3626 | return 0x7; | |
3627 | case 4: | |
3628 | return 0xf; | |
3629 | case 5: | |
3630 | return 0x1f; | |
3631 | case 6: | |
3632 | default: | |
3633 | return 0x3f; | |
3634 | } | |
3635 | } | |
3636 | ||
3637 | int amdgpu_dm_encoder_init( | |
3638 | struct drm_device *dev, | |
3639 | struct amdgpu_encoder *aencoder, | |
3640 | uint32_t link_index) | |
3641 | { | |
3642 | struct amdgpu_device *adev = dev->dev_private; | |
3643 | ||
3644 | int res = drm_encoder_init(dev, | |
3645 | &aencoder->base, | |
3646 | &amdgpu_dm_encoder_funcs, | |
3647 | DRM_MODE_ENCODER_TMDS, | |
3648 | NULL); | |
3649 | ||
3650 | aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); | |
3651 | ||
3652 | if (!res) | |
3653 | aencoder->encoder_id = link_index; | |
3654 | else | |
3655 | aencoder->encoder_id = -1; | |
3656 | ||
3657 | drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); | |
3658 | ||
3659 | return res; | |
3660 | } | |
3661 | ||
3662 | static void manage_dm_interrupts( | |
3663 | struct amdgpu_device *adev, | |
3664 | struct amdgpu_crtc *acrtc, | |
3665 | bool enable) | |
3666 | { | |
3667 | /* | |
3668 | * this is not correct translation but will work as soon as VBLANK | |
3669 | * constant is the same as PFLIP | |
3670 | */ | |
3671 | int irq_type = | |
3672 | amdgpu_crtc_idx_to_irq_type( | |
3673 | adev, | |
3674 | acrtc->crtc_id); | |
3675 | ||
3676 | if (enable) { | |
3677 | drm_crtc_vblank_on(&acrtc->base); | |
3678 | amdgpu_irq_get( | |
3679 | adev, | |
3680 | &adev->pageflip_irq, | |
3681 | irq_type); | |
3682 | } else { | |
3683 | ||
3684 | amdgpu_irq_put( | |
3685 | adev, | |
3686 | &adev->pageflip_irq, | |
3687 | irq_type); | |
3688 | drm_crtc_vblank_off(&acrtc->base); | |
3689 | } | |
3690 | } | |
3691 | ||
3692 | static bool is_scaling_state_different( | |
3693 | const struct dm_connector_state *dm_state, | |
3694 | const struct dm_connector_state *old_dm_state) | |
3695 | { | |
3696 | if (dm_state->scaling != old_dm_state->scaling) | |
3697 | return true; | |
3698 | if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { | |
3699 | if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) | |
3700 | return true; | |
3701 | } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { | |
3702 | if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) | |
3703 | return true; | |
3704 | } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder | |
3705 | || dm_state->underscan_vborder != old_dm_state->underscan_vborder) | |
3706 | return true; | |
3707 | return false; | |
3708 | } | |
3709 | ||
3710 | static void remove_stream( | |
3711 | struct amdgpu_device *adev, | |
3712 | struct amdgpu_crtc *acrtc, | |
3713 | struct dc_stream *stream) | |
3714 | { | |
3715 | /* this is the update mode case */ | |
3716 | if (adev->dm.freesync_module) | |
3717 | mod_freesync_remove_stream(adev->dm.freesync_module, stream); | |
3718 | ||
3719 | acrtc->otg_inst = -1; | |
3720 | acrtc->enabled = false; | |
3721 | } | |
3722 | ||
3723 | static void handle_cursor_update( | |
3724 | struct drm_plane *plane, | |
3725 | struct drm_plane_state *old_plane_state) | |
3726 | { | |
3727 | if (!plane->state->fb && !old_plane_state->fb) | |
3728 | return; | |
3729 | ||
3730 | /* Check if it's a cursor on/off update or just cursor move*/ | |
3731 | if (plane->state->fb == old_plane_state->fb) | |
3732 | dm_crtc_cursor_move( | |
3733 | plane->state->crtc, | |
3734 | plane->state->crtc_x, | |
3735 | plane->state->crtc_y); | |
3736 | else { | |
3737 | struct amdgpu_framebuffer *afb = | |
3738 | to_amdgpu_framebuffer(plane->state->fb); | |
3739 | dm_crtc_cursor_set( | |
3740 | (!!plane->state->fb) ? | |
3741 | plane->state->crtc : | |
3742 | old_plane_state->crtc, | |
3743 | (!!plane->state->fb) ? | |
3744 | afb->address : | |
3745 | 0, | |
3746 | plane->state->crtc_w, | |
3747 | plane->state->crtc_h); | |
3748 | } | |
3749 | } | |
3750 | ||
3751 | ||
3752 | static void prepare_flip_isr(struct amdgpu_crtc *acrtc) | |
3753 | { | |
3754 | ||
3755 | assert_spin_locked(&acrtc->base.dev->event_lock); | |
3756 | WARN_ON(acrtc->event); | |
3757 | ||
3758 | acrtc->event = acrtc->base.state->event; | |
3759 | ||
3760 | /* Set the flip status */ | |
3761 | acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; | |
3762 | ||
3763 | /* Mark this event as consumed */ | |
3764 | acrtc->base.state->event = NULL; | |
3765 | ||
3766 | DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", | |
3767 | acrtc->crtc_id); | |
3768 | } | |
3769 | ||
3770 | /* | |
3771 | * Executes flip | |
3772 | * | |
3773 | * Waits on all BO's fences and for proper vblank count | |
3774 | */ | |
3775 | static void amdgpu_dm_do_flip( | |
3776 | struct drm_crtc *crtc, | |
3777 | struct drm_framebuffer *fb, | |
3778 | uint32_t target) | |
3779 | { | |
3780 | unsigned long flags; | |
3781 | uint32_t target_vblank; | |
3782 | int r, vpos, hpos; | |
3783 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | |
3784 | struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); | |
3785 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj); | |
3786 | struct amdgpu_device *adev = crtc->dev->dev_private; | |
3787 | bool async_flip = (acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; | |
3788 | struct dc_flip_addrs addr = { {0} }; | |
3789 | struct dc_surface_update surface_updates[1] = { {0} }; | |
3790 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); | |
3791 | ||
3792 | ||
3793 | /* Prepare wait for target vblank early - before the fence-waits */ | |
3794 | target_vblank = target - drm_crtc_vblank_count(crtc) + | |
3795 | amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id); | |
3796 | ||
3797 | /*TODO This might fail and hence better not used, wait | |
3798 | * explicitly on fences instead | |
3799 | * and in general should be called for | |
3800 | * blocking commit to as per framework helpers | |
3801 | * */ | |
3802 | r = amdgpu_bo_reserve(abo, true); | |
3803 | if (unlikely(r != 0)) { | |
3804 | DRM_ERROR("failed to reserve buffer before flip\n"); | |
3805 | WARN_ON(1); | |
3806 | } | |
3807 | ||
3808 | /* Wait for all fences on this FB */ | |
3809 | WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false, | |
3810 | MAX_SCHEDULE_TIMEOUT) < 0); | |
3811 | ||
3812 | amdgpu_bo_unreserve(abo); | |
3813 | ||
3814 | /* Wait until we're out of the vertical blank period before the one | |
3815 | * targeted by the flip | |
3816 | */ | |
3817 | while ((acrtc->enabled && | |
3818 | (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0, | |
3819 | &vpos, &hpos, NULL, NULL, | |
3820 | &crtc->hwmode) | |
3821 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == | |
3822 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && | |
3823 | (int)(target_vblank - | |
3824 | amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) { | |
3825 | usleep_range(1000, 1100); | |
3826 | } | |
3827 | ||
3828 | /* Flip */ | |
3829 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | |
3830 | /* update crtc fb */ | |
3831 | crtc->primary->fb = fb; | |
3832 | ||
3833 | WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE); | |
3834 | WARN_ON(!acrtc_state->stream); | |
3835 | ||
3836 | addr.address.grph.addr.low_part = lower_32_bits(afb->address); | |
3837 | addr.address.grph.addr.high_part = upper_32_bits(afb->address); | |
3838 | addr.flip_immediate = async_flip; | |
3839 | ||
3840 | ||
3841 | if (acrtc->base.state->event) | |
3842 | prepare_flip_isr(acrtc); | |
3843 | ||
3844 | surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->surfaces[0]; | |
3845 | surface_updates->flip_addr = &addr; | |
3846 | ||
3847 | ||
3848 | dc_update_surfaces_and_stream(adev->dm.dc, surface_updates, 1, acrtc_state->stream, NULL); | |
3849 | ||
3850 | DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n", | |
3851 | __func__, | |
3852 | addr.address.grph.addr.high_part, | |
3853 | addr.address.grph.addr.low_part); | |
3854 | ||
3855 | ||
3856 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | |
3857 | } | |
3858 | ||
3859 | static void amdgpu_dm_commit_surfaces(struct drm_atomic_state *state, | |
3860 | struct drm_device *dev, | |
3861 | struct amdgpu_display_manager *dm, | |
3862 | struct drm_crtc *pcrtc, | |
3863 | bool *wait_for_vblank) | |
3864 | { | |
3865 | uint32_t i; | |
3866 | struct drm_plane *plane; | |
3867 | struct drm_plane_state *old_plane_state; | |
3868 | struct dc_stream *dc_stream_attach; | |
3869 | struct dc_surface *dc_surfaces_constructed[MAX_SURFACES]; | |
3870 | struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); | |
3871 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(pcrtc->state); | |
3872 | int planes_count = 0; | |
3873 | unsigned long flags; | |
3874 | ||
3875 | /* update planes when needed */ | |
3876 | for_each_plane_in_state(state, plane, old_plane_state, i) { | |
3877 | struct drm_plane_state *plane_state = plane->state; | |
3878 | struct drm_crtc *crtc = plane_state->crtc; | |
3879 | struct drm_framebuffer *fb = plane_state->fb; | |
3880 | bool pflip_needed; | |
3881 | struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); | |
3882 | ||
3883 | if (plane->type == DRM_PLANE_TYPE_CURSOR) { | |
3884 | handle_cursor_update(plane, old_plane_state); | |
3885 | continue; | |
3886 | } | |
3887 | ||
3888 | if (!fb || !crtc || pcrtc != crtc || !crtc->state->active || | |
3889 | (!crtc->state->planes_changed && | |
3890 | !pcrtc->state->color_mgmt_changed)) | |
3891 | continue; | |
3892 | ||
3893 | pflip_needed = !state->allow_modeset; | |
3894 | ||
3895 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | |
3896 | if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) { | |
3897 | DRM_ERROR("add_surface: acrtc %d, already busy\n", | |
3898 | acrtc_attach->crtc_id); | |
3899 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | |
3900 | /* In comit tail framework this cannot happen */ | |
3901 | WARN_ON(1); | |
3902 | } | |
3903 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | |
3904 | ||
3905 | if (!pflip_needed) { | |
3906 | WARN_ON(!dm_plane_state->surface); | |
3907 | ||
3908 | dc_surfaces_constructed[planes_count] = dm_plane_state->surface; | |
3909 | ||
3910 | dc_stream_attach = acrtc_state->stream; | |
3911 | planes_count++; | |
3912 | ||
3913 | } else if (crtc->state->planes_changed) { | |
3914 | /* Assume even ONE crtc with immediate flip means | |
3915 | * entire can't wait for VBLANK | |
3916 | * TODO Check if it's correct | |
3917 | */ | |
3918 | *wait_for_vblank = | |
3919 | acrtc_attach->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? | |
3920 | false : true; | |
3921 | ||
3922 | /* TODO: Needs rework for multiplane flip */ | |
3923 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) | |
3924 | drm_crtc_vblank_get(crtc); | |
3925 | ||
3926 | amdgpu_dm_do_flip( | |
3927 | crtc, | |
3928 | fb, | |
3929 | drm_crtc_vblank_count(crtc) + *wait_for_vblank); | |
3930 | ||
3931 | /*TODO BUG remove ASAP in 4.12 to avoid race between worker and flip IOCTL */ | |
3932 | ||
3933 | /*clean up the flags for next usage*/ | |
3934 | acrtc_attach->flip_flags = 0; | |
3935 | } | |
3936 | ||
3937 | } | |
3938 | ||
3939 | if (planes_count) { | |
3940 | unsigned long flags; | |
3941 | ||
3942 | if (pcrtc->state->event) { | |
3943 | ||
3944 | drm_crtc_vblank_get(pcrtc); | |
3945 | ||
3946 | spin_lock_irqsave(&pcrtc->dev->event_lock, flags); | |
3947 | prepare_flip_isr(acrtc_attach); | |
3948 | spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); | |
3949 | } | |
3950 | ||
3951 | if (false == dc_commit_surfaces_to_stream(dm->dc, | |
3952 | dc_surfaces_constructed, | |
3953 | planes_count, | |
3954 | dc_stream_attach)) | |
3955 | dm_error("%s: Failed to attach surface!\n", __func__); | |
3956 | } else { | |
3957 | /*TODO BUG Here should go disable planes on CRTC. */ | |
3958 | } | |
3959 | } | |
3960 | ||
3961 | ||
3962 | int amdgpu_dm_atomic_commit( | |
3963 | struct drm_device *dev, | |
3964 | struct drm_atomic_state *state, | |
3965 | bool nonblock) | |
3966 | { | |
3967 | struct drm_crtc *crtc; | |
3968 | struct drm_crtc_state *new_state; | |
3969 | struct amdgpu_device *adev = dev->dev_private; | |
3970 | int i; | |
3971 | ||
3972 | /* | |
3973 | * We evade vblanks and pflips on crtc that | |
3974 | * should be changed. We do it here to flush & disable | |
3975 | * interrupts before drm_swap_state is called in drm_atomic_helper_commit | |
3976 | * it will update crtc->dm_crtc_state->stream pointer which is used in | |
3977 | * the ISRs. | |
3978 | */ | |
3979 | for_each_crtc_in_state(state, crtc, new_state, i) { | |
3980 | struct dm_crtc_state *old_acrtc_state = to_dm_crtc_state(crtc->state); | |
3981 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | |
3982 | ||
3983 | if (drm_atomic_crtc_needs_modeset(new_state) && old_acrtc_state->stream) | |
3984 | manage_dm_interrupts(adev, acrtc, false); | |
3985 | } | |
3986 | ||
3987 | return drm_atomic_helper_commit(dev, state, nonblock); | |
3988 | ||
3989 | /*TODO Handle EINTR, reenable IRQ*/ | |
3990 | } | |
3991 | ||
3992 | void amdgpu_dm_atomic_commit_tail( | |
3993 | struct drm_atomic_state *state) | |
3994 | { | |
3995 | struct drm_device *dev = state->dev; | |
3996 | struct amdgpu_device *adev = dev->dev_private; | |
3997 | struct amdgpu_display_manager *dm = &adev->dm; | |
3998 | struct dm_atomic_state *dm_state; | |
3999 | uint32_t i, j; | |
4000 | uint32_t new_crtcs_count = 0; | |
4001 | struct drm_crtc *crtc, *pcrtc; | |
4002 | struct drm_crtc_state *old_crtc_state; | |
4003 | struct amdgpu_crtc *new_crtcs[MAX_STREAMS]; | |
4004 | struct dc_stream *new_stream = NULL; | |
4005 | unsigned long flags; | |
4006 | bool wait_for_vblank = true; | |
4007 | struct drm_connector *connector; | |
4008 | struct drm_connector_state *old_conn_state; | |
4009 | struct dm_crtc_state *old_acrtc_state, *new_acrtc_state; | |
4010 | ||
4011 | drm_atomic_helper_update_legacy_modeset_state(dev, state); | |
4012 | ||
4013 | dm_state = to_dm_atomic_state(state); | |
4014 | ||
4015 | /* update changed items */ | |
4016 | for_each_crtc_in_state(state, crtc, old_crtc_state, i) { | |
4017 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | |
4018 | struct drm_crtc_state *new_state = crtc->state; | |
4019 | new_acrtc_state = to_dm_crtc_state(new_state); | |
4020 | old_acrtc_state = to_dm_crtc_state(old_crtc_state); | |
4021 | ||
4022 | DRM_DEBUG_KMS( | |
4023 | "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " | |
4024 | "planes_changed:%d, mode_changed:%d,active_changed:%d," | |
4025 | "connectors_changed:%d\n", | |
4026 | acrtc->crtc_id, | |
4027 | new_state->enable, | |
4028 | new_state->active, | |
4029 | new_state->planes_changed, | |
4030 | new_state->mode_changed, | |
4031 | new_state->active_changed, | |
4032 | new_state->connectors_changed); | |
4033 | ||
4034 | /* handles headless hotplug case, updating new_state and | |
4035 | * aconnector as needed | |
4036 | */ | |
4037 | ||
4038 | if (modeset_required(new_state)) { | |
4039 | ||
4040 | DRM_INFO("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); | |
4041 | ||
4042 | if (!new_acrtc_state->stream) { | |
4043 | /* | |
4044 | * this could happen because of issues with | |
4045 | * userspace notifications delivery. | |
4046 | * In this case userspace tries to set mode on | |
4047 | * display which is disconnect in fact. | |
4048 | * dc_sink in NULL in this case on aconnector. | |
4049 | * We expect reset mode will come soon. | |
4050 | * | |
4051 | * This can also happen when unplug is done | |
4052 | * during resume sequence ended | |
4053 | * | |
4054 | * In this case, we want to pretend we still | |
4055 | * have a sink to keep the pipe running so that | |
4056 | * hw state is consistent with the sw state | |
4057 | */ | |
4058 | DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n", | |
4059 | __func__, acrtc->base.base.id); | |
4060 | continue; | |
4061 | } | |
4062 | ||
4063 | ||
4064 | if (old_acrtc_state->stream) | |
4065 | remove_stream(adev, acrtc, old_acrtc_state->stream); | |
4066 | ||
4067 | ||
4068 | /* | |
4069 | * this loop saves set mode crtcs | |
4070 | * we needed to enable vblanks once all | |
4071 | * resources acquired in dc after dc_commit_streams | |
4072 | */ | |
4073 | ||
4074 | /*TODO move all this into dm_crtc_state, get rid of | |
4075 | * new_crtcs array and use old and new atomic states | |
4076 | * instead | |
4077 | */ | |
4078 | new_crtcs[new_crtcs_count] = acrtc; | |
4079 | new_crtcs_count++; | |
4080 | ||
4081 | acrtc->enabled = true; | |
4082 | acrtc->hw_mode = crtc->state->mode; | |
4083 | crtc->hwmode = crtc->state->mode; | |
4084 | } else if (modereset_required(new_state)) { | |
4085 | DRM_INFO("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); | |
4086 | ||
4087 | /* i.e. reset mode */ | |
4088 | if (old_acrtc_state->stream) | |
4089 | remove_stream(adev, acrtc, old_acrtc_state->stream); | |
4090 | } | |
4091 | } /* for_each_crtc_in_state() */ | |
4092 | ||
4093 | /* | |
4094 | * Add streams after required streams from new and replaced streams | |
4095 | * are removed from freesync module | |
4096 | */ | |
4097 | if (adev->dm.freesync_module) { | |
4098 | for (i = 0; i < new_crtcs_count; i++) { | |
4099 | struct amdgpu_connector *aconnector = NULL; | |
4100 | new_acrtc_state = to_dm_crtc_state(new_crtcs[i]->base.state); | |
4101 | ||
4102 | new_stream = new_acrtc_state->stream; | |
4103 | aconnector = | |
4104 | amdgpu_dm_find_first_crct_matching_connector( | |
4105 | state, | |
4106 | &new_crtcs[i]->base, | |
4107 | false); | |
4108 | if (!aconnector) { | |
4109 | DRM_INFO( | |
4110 | "Atomic commit: Failed to find connector for acrtc id:%d " | |
4111 | "skipping freesync init\n", | |
4112 | new_crtcs[i]->crtc_id); | |
4113 | continue; | |
4114 | } | |
4115 | ||
4116 | mod_freesync_add_stream(adev->dm.freesync_module, | |
4117 | new_stream, &aconnector->caps); | |
4118 | } | |
4119 | } | |
4120 | ||
4121 | if (dm_state->context) | |
4122 | WARN_ON(!dc_commit_context(dm->dc, dm_state->context)); | |
4123 | ||
4124 | ||
4125 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
4126 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | |
4127 | new_acrtc_state = to_dm_crtc_state(crtc->state); | |
4128 | ||
4129 | if (new_acrtc_state->stream != NULL) { | |
4130 | const struct dc_stream_status *status = | |
4131 | dc_stream_get_status(new_acrtc_state->stream); | |
4132 | ||
4133 | if (!status) | |
4134 | DC_ERR("got no status for stream %p on acrtc%p\n", new_acrtc_state->stream, acrtc); | |
4135 | else | |
4136 | acrtc->otg_inst = status->primary_otg_inst; | |
4137 | } | |
4138 | } | |
4139 | ||
4140 | /* Handle scaling and undersacn changes*/ | |
4141 | for_each_connector_in_state(state, connector, old_conn_state, i) { | |
4142 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); | |
4143 | struct dm_connector_state *con_new_state = | |
4144 | to_dm_connector_state(aconnector->base.state); | |
4145 | struct dm_connector_state *con_old_state = | |
4146 | to_dm_connector_state(old_conn_state); | |
4147 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(con_new_state->base.crtc); | |
4148 | struct dc_stream_status *status = NULL; | |
4149 | ||
4150 | /* Skip any modesets/resets */ | |
4151 | if (!acrtc || drm_atomic_crtc_needs_modeset(acrtc->base.state)) | |
4152 | continue; | |
4153 | ||
4154 | /* Skip any thing not scale or underscan changes */ | |
4155 | if (!is_scaling_state_different(con_new_state, con_old_state)) | |
4156 | continue; | |
4157 | ||
4158 | new_acrtc_state = to_dm_crtc_state(acrtc->base.state); | |
4159 | ||
4160 | update_stream_scaling_settings(&con_new_state->base.crtc->mode, | |
4161 | con_new_state, (struct dc_stream *)new_acrtc_state->stream); | |
4162 | ||
4163 | status = dc_stream_get_status(new_acrtc_state->stream); | |
4164 | WARN_ON(!status); | |
4165 | WARN_ON(!status->surface_count); | |
4166 | ||
4167 | if (!new_acrtc_state->stream) | |
4168 | continue; | |
4169 | ||
4170 | /*TODO How it works with MPO ?*/ | |
4171 | if (!dc_commit_surfaces_to_stream( | |
4172 | dm->dc, | |
4173 | status->surfaces, | |
4174 | status->surface_count, | |
4175 | new_acrtc_state->stream)) | |
4176 | dm_error("%s: Failed to update stream scaling!\n", __func__); | |
4177 | } | |
4178 | ||
4179 | for (i = 0; i < new_crtcs_count; i++) { | |
4180 | /* | |
4181 | * loop to enable interrupts on newly arrived crtc | |
4182 | */ | |
4183 | struct amdgpu_crtc *acrtc = new_crtcs[i]; | |
4184 | new_acrtc_state = to_dm_crtc_state(acrtc->base.state); | |
4185 | ||
4186 | if (adev->dm.freesync_module) | |
4187 | mod_freesync_notify_mode_change( | |
4188 | adev->dm.freesync_module, &new_acrtc_state->stream, 1); | |
4189 | ||
4190 | manage_dm_interrupts(adev, acrtc, true); | |
4191 | } | |
4192 | ||
4193 | /* update planes when needed per crtc*/ | |
4194 | for_each_crtc_in_state(state, pcrtc, old_crtc_state, j) { | |
4195 | new_acrtc_state = to_dm_crtc_state(pcrtc->state); | |
4196 | ||
4197 | if (new_acrtc_state->stream) | |
4198 | amdgpu_dm_commit_surfaces(state, dev, dm, pcrtc, &wait_for_vblank); | |
4199 | } | |
4200 | ||
4201 | ||
4202 | /* | |
4203 | * send vblank event on all events not handled in flip and | |
4204 | * mark consumed event for drm_atomic_helper_commit_hw_done | |
4205 | */ | |
4206 | spin_lock_irqsave(&adev->ddev->event_lock, flags); | |
4207 | for_each_crtc_in_state(state, crtc, old_crtc_state, i) { | |
4208 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); | |
4209 | ||
4210 | if (acrtc->base.state->event) | |
4211 | drm_send_event_locked(dev, &crtc->state->event->base); | |
4212 | ||
4213 | acrtc->base.state->event = NULL; | |
4214 | } | |
4215 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | |
4216 | ||
4217 | /* Signal HW programming completion */ | |
4218 | drm_atomic_helper_commit_hw_done(state); | |
4219 | ||
4220 | if (wait_for_vblank) | |
4221 | drm_atomic_helper_wait_for_vblanks(dev, state); | |
4222 | ||
4223 | drm_atomic_helper_cleanup_planes(dev, state); | |
4224 | } | |
4225 | ||
4226 | ||
4227 | static int dm_force_atomic_commit(struct drm_connector *connector) | |
4228 | { | |
4229 | int ret = 0; | |
4230 | struct drm_device *ddev = connector->dev; | |
4231 | struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); | |
4232 | struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); | |
4233 | struct drm_plane *plane = disconnected_acrtc->base.primary; | |
4234 | struct drm_connector_state *conn_state; | |
4235 | struct drm_crtc_state *crtc_state; | |
4236 | struct drm_plane_state *plane_state; | |
4237 | ||
4238 | if (!state) | |
4239 | return -ENOMEM; | |
4240 | ||
4241 | state->acquire_ctx = ddev->mode_config.acquire_ctx; | |
4242 | ||
4243 | /* Construct an atomic state to restore previous display setting */ | |
4244 | ||
4245 | /* | |
4246 | * Attach connectors to drm_atomic_state | |
4247 | */ | |
4248 | conn_state = drm_atomic_get_connector_state(state, connector); | |
4249 | ||
4250 | ret = PTR_ERR_OR_ZERO(conn_state); | |
4251 | if (ret) | |
4252 | goto err; | |
4253 | ||
4254 | /* Attach crtc to drm_atomic_state*/ | |
4255 | crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); | |
4256 | ||
4257 | ret = PTR_ERR_OR_ZERO(crtc_state); | |
4258 | if (ret) | |
4259 | goto err; | |
4260 | ||
4261 | /* force a restore */ | |
4262 | crtc_state->mode_changed = true; | |
4263 | ||
4264 | /* Attach plane to drm_atomic_state */ | |
4265 | plane_state = drm_atomic_get_plane_state(state, plane); | |
4266 | ||
4267 | ret = PTR_ERR_OR_ZERO(plane_state); | |
4268 | if (ret) | |
4269 | goto err; | |
4270 | ||
4271 | ||
4272 | /* Call commit internally with the state we just constructed */ | |
4273 | ret = drm_atomic_commit(state); | |
4274 | if (!ret) | |
4275 | return 0; | |
4276 | ||
4277 | err: | |
4278 | DRM_ERROR("Restoring old state failed with %i\n", ret); | |
4279 | drm_atomic_state_put(state); | |
4280 | ||
4281 | return ret; | |
4282 | } | |
4283 | ||
4284 | /* | |
4285 | * This functions handle all cases when set mode does not come upon hotplug. | |
4286 | * This include when the same display is unplugged then plugged back into the | |
4287 | * same port and when we are running without usermode desktop manager supprot | |
4288 | */ | |
4289 | void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector) | |
4290 | { | |
4291 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); | |
4292 | struct amdgpu_crtc *disconnected_acrtc; | |
4293 | struct dm_crtc_state *acrtc_state; | |
4294 | ||
4295 | if (!aconnector->dc_sink || !connector->state || !connector->encoder) | |
4296 | return; | |
4297 | ||
4298 | disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); | |
4299 | acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); | |
4300 | ||
4301 | if (!disconnected_acrtc || !acrtc_state->stream) | |
4302 | return; | |
4303 | ||
4304 | /* | |
4305 | * If the previous sink is not released and different from the current, | |
4306 | * we deduce we are in a state where we can not rely on usermode call | |
4307 | * to turn on the display, so we do it here | |
4308 | */ | |
4309 | if (acrtc_state->stream->sink != aconnector->dc_sink) | |
4310 | dm_force_atomic_commit(&aconnector->base); | |
4311 | } | |
4312 | ||
4313 | static uint32_t add_val_sets_surface( | |
4314 | struct dc_validation_set *val_sets, | |
4315 | uint32_t set_count, | |
4316 | const struct dc_stream *stream, | |
4317 | struct dc_surface *surface) | |
4318 | { | |
4319 | uint32_t i = 0, j = 0; | |
4320 | ||
4321 | while (i < set_count) { | |
4322 | if (val_sets[i].stream == stream) { | |
4323 | while (val_sets[i].surfaces[j]) | |
4324 | j++; | |
4325 | break; | |
4326 | } | |
4327 | ++i; | |
4328 | } | |
4329 | ||
4330 | val_sets[i].surfaces[j] = surface; | |
4331 | val_sets[i].surface_count++; | |
4332 | ||
4333 | return val_sets[i].surface_count; | |
4334 | } | |
4335 | ||
4336 | static uint32_t update_in_val_sets_stream( | |
4337 | struct dc_validation_set *val_sets, | |
4338 | uint32_t set_count, | |
4339 | struct dc_stream *old_stream, | |
4340 | struct dc_stream *new_stream, | |
4341 | struct drm_crtc *crtc) | |
4342 | { | |
4343 | uint32_t i = 0; | |
4344 | ||
4345 | while (i < set_count) { | |
4346 | if (val_sets[i].stream == old_stream) | |
4347 | break; | |
4348 | ++i; | |
4349 | } | |
4350 | ||
4351 | val_sets[i].stream = new_stream; | |
4352 | ||
4353 | if (i == set_count) | |
4354 | /* nothing found. add new one to the end */ | |
4355 | return set_count + 1; | |
4356 | ||
4357 | return set_count; | |
4358 | } | |
4359 | ||
4360 | static uint32_t remove_from_val_sets( | |
4361 | struct dc_validation_set *val_sets, | |
4362 | uint32_t set_count, | |
4363 | const struct dc_stream *stream) | |
4364 | { | |
4365 | int i; | |
4366 | ||
4367 | for (i = 0; i < set_count; i++) | |
4368 | if (val_sets[i].stream == stream) | |
4369 | break; | |
4370 | ||
4371 | if (i == set_count) { | |
4372 | /* nothing found */ | |
4373 | return set_count; | |
4374 | } | |
4375 | ||
4376 | set_count--; | |
4377 | ||
4378 | for (; i < set_count; i++) { | |
4379 | val_sets[i] = val_sets[i + 1]; | |
4380 | } | |
4381 | ||
4382 | return set_count; | |
4383 | } | |
4384 | ||
4385 | /*` | |
4386 | * Grabs all modesetting locks to serialize against any blocking commits, | |
4387 | * Waits for completion of all non blocking commits. | |
4388 | */ | |
4389 | static int do_aquire_global_lock( | |
4390 | struct drm_device *dev, | |
4391 | struct drm_atomic_state *state) | |
4392 | { | |
4393 | struct drm_crtc *crtc; | |
4394 | struct drm_crtc_commit *commit; | |
4395 | long ret; | |
4396 | ||
4397 | /* Adding all modeset locks to aquire_ctx will | |
4398 | * ensure that when the framework release it the | |
4399 | * extra locks we are locking here will get released to | |
4400 | */ | |
4401 | ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); | |
4402 | if (ret) | |
4403 | return ret; | |
4404 | ||
4405 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
4406 | spin_lock(&crtc->commit_lock); | |
4407 | commit = list_first_entry_or_null(&crtc->commit_list, | |
4408 | struct drm_crtc_commit, commit_entry); | |
4409 | if (commit) | |
4410 | drm_crtc_commit_get(commit); | |
4411 | spin_unlock(&crtc->commit_lock); | |
4412 | ||
4413 | if (!commit) | |
4414 | continue; | |
4415 | ||
4416 | /* Make sure all pending HW programming completed and | |
4417 | * page flips done | |
4418 | */ | |
4419 | ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); | |
4420 | ||
4421 | if (ret > 0) | |
4422 | ret = wait_for_completion_interruptible_timeout( | |
4423 | &commit->flip_done, 10*HZ); | |
4424 | ||
4425 | if (ret == 0) | |
4426 | DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " | |
4427 | "timed out\n", crtc->base.id, crtc->name); | |
4428 | ||
4429 | drm_crtc_commit_put(commit); | |
4430 | } | |
4431 | ||
4432 | return ret < 0 ? ret : 0; | |
4433 | } | |
4434 | ||
4435 | int amdgpu_dm_atomic_check(struct drm_device *dev, | |
4436 | struct drm_atomic_state *state) | |
4437 | { | |
4438 | struct dm_atomic_state *dm_state; | |
4439 | struct drm_crtc *crtc; | |
4440 | struct drm_crtc_state *crtc_state; | |
4441 | struct drm_plane *plane; | |
4442 | struct drm_plane_state *plane_state; | |
4443 | int i, j; | |
4444 | int ret; | |
4445 | struct amdgpu_device *adev = dev->dev_private; | |
4446 | struct dc *dc = adev->dm.dc; | |
4447 | struct drm_connector *connector; | |
4448 | struct drm_connector_state *conn_state; | |
4449 | int set_count; | |
4450 | struct dc_validation_set set[MAX_STREAMS] = { { 0 } }; | |
4451 | struct dm_crtc_state *old_acrtc_state, *new_acrtc_state; | |
4452 | ||
4453 | /* | |
4454 | * This bool will be set for true for any modeset/reset | |
4455 | * or surface update which implies non fast surface update. | |
4456 | */ | |
4457 | bool lock_and_validation_needed = false; | |
4458 | ||
4459 | ret = drm_atomic_helper_check_modeset(dev, state); | |
4460 | ||
4461 | if (ret) { | |
4462 | DRM_ERROR("Atomic state validation failed with error :%d !\n", ret); | |
4463 | return ret; | |
4464 | } | |
4465 | ||
4466 | dm_state = to_dm_atomic_state(state); | |
4467 | ||
4468 | /* copy existing configuration */ | |
4469 | set_count = 0; | |
4470 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
4471 | ||
4472 | old_acrtc_state = to_dm_crtc_state(crtc->state); | |
4473 | ||
4474 | if (old_acrtc_state->stream) { | |
4475 | dc_stream_retain(old_acrtc_state->stream); | |
4476 | set[set_count].stream = old_acrtc_state->stream; | |
4477 | ++set_count; | |
4478 | } | |
4479 | } | |
4480 | ||
4481 | /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */ | |
4482 | /* update changed items */ | |
4483 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | |
4484 | struct amdgpu_crtc *acrtc = NULL; | |
4485 | struct amdgpu_connector *aconnector = NULL; | |
4486 | old_acrtc_state = to_dm_crtc_state(crtc->state); | |
4487 | new_acrtc_state = to_dm_crtc_state(crtc_state); | |
4488 | acrtc = to_amdgpu_crtc(crtc); | |
4489 | ||
4490 | aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true); | |
4491 | ||
4492 | DRM_DEBUG_KMS( | |
4493 | "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " | |
4494 | "planes_changed:%d, mode_changed:%d,active_changed:%d," | |
4495 | "connectors_changed:%d\n", | |
4496 | acrtc->crtc_id, | |
4497 | crtc_state->enable, | |
4498 | crtc_state->active, | |
4499 | crtc_state->planes_changed, | |
4500 | crtc_state->mode_changed, | |
4501 | crtc_state->active_changed, | |
4502 | crtc_state->connectors_changed); | |
4503 | ||
4504 | if (modeset_required(crtc_state)) { | |
4505 | ||
4506 | struct dc_stream *new_stream = NULL; | |
4507 | struct drm_connector_state *conn_state = NULL; | |
4508 | struct dm_connector_state *dm_conn_state = NULL; | |
4509 | ||
4510 | if (aconnector) { | |
4511 | conn_state = drm_atomic_get_connector_state(state, &aconnector->base); | |
4512 | if (IS_ERR(conn_state)) { | |
4513 | ret = PTR_ERR_OR_ZERO(conn_state); | |
4514 | goto fail; | |
4515 | } | |
4516 | ||
4517 | dm_conn_state = to_dm_connector_state(conn_state); | |
4518 | } | |
4519 | ||
4520 | new_stream = create_stream_for_sink(aconnector, &crtc_state->mode, dm_conn_state); | |
4521 | ||
4522 | /* | |
4523 | * we can have no stream on ACTION_SET if a display | |
4524 | * was disconnected during S3, in this case it not and | |
4525 | * error, the OS will be updated after detection, and | |
4526 | * do the right thing on next atomic commit | |
4527 | */ | |
4528 | if (!new_stream) { | |
4529 | DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n", | |
4530 | __func__, acrtc->base.base.id); | |
4531 | break; | |
4532 | } | |
4533 | ||
4534 | if (new_acrtc_state->stream) | |
4535 | dc_stream_release(new_acrtc_state->stream); | |
4536 | ||
4537 | new_acrtc_state->stream = new_stream; | |
4538 | ||
4539 | set_count = update_in_val_sets_stream( | |
4540 | set, | |
4541 | set_count, | |
4542 | old_acrtc_state->stream, | |
4543 | new_acrtc_state->stream, | |
4544 | crtc); | |
4545 | ||
4546 | lock_and_validation_needed = true; | |
4547 | ||
4548 | } else if (modereset_required(crtc_state)) { | |
4549 | ||
4550 | /* i.e. reset mode */ | |
4551 | if (new_acrtc_state->stream) { | |
4552 | set_count = remove_from_val_sets( | |
4553 | set, | |
4554 | set_count, | |
4555 | new_acrtc_state->stream); | |
4556 | ||
4557 | dc_stream_release(new_acrtc_state->stream); | |
4558 | new_acrtc_state->stream = NULL; | |
4559 | ||
4560 | lock_and_validation_needed = true; | |
4561 | } | |
4562 | } | |
4563 | ||
4564 | ||
4565 | /* | |
4566 | * Hack: Commit needs planes right now, specifically for gamma | |
4567 | * TODO rework commit to check CRTC for gamma change | |
4568 | */ | |
4569 | if (crtc_state->color_mgmt_changed) { | |
4570 | ||
4571 | ret = drm_atomic_add_affected_planes(state, crtc); | |
4572 | if (ret) | |
4573 | goto fail; | |
4574 | } | |
4575 | } | |
4576 | ||
4577 | /* Check scaling and undersacn changes*/ | |
4578 | /*TODO Removed scaling changes validation due to inability to commit | |
4579 | * new stream into context w\o causing full reset. Need to | |
4580 | * decide how to handle. | |
4581 | */ | |
4582 | for_each_connector_in_state(state, connector, conn_state, i) { | |
4583 | struct amdgpu_connector *aconnector = to_amdgpu_connector(connector); | |
4584 | struct dm_connector_state *con_old_state = | |
4585 | to_dm_connector_state(aconnector->base.state); | |
4586 | struct dm_connector_state *con_new_state = | |
4587 | to_dm_connector_state(conn_state); | |
4588 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(con_new_state->base.crtc); | |
4589 | ||
4590 | /* Skip any modesets/resets */ | |
4591 | if (!acrtc || drm_atomic_crtc_needs_modeset(acrtc->base.state)) | |
4592 | continue; | |
4593 | ||
4594 | /* Skip any thing not scale or underscan chnages */ | |
4595 | if (!is_scaling_state_different(con_new_state, con_old_state)) | |
4596 | continue; | |
4597 | ||
4598 | lock_and_validation_needed = true; | |
4599 | } | |
4600 | ||
4601 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | |
4602 | new_acrtc_state = to_dm_crtc_state(crtc_state); | |
4603 | ||
4604 | for_each_plane_in_state(state, plane, plane_state, j) { | |
4605 | struct drm_crtc *plane_crtc = plane_state->crtc; | |
4606 | struct drm_framebuffer *fb = plane_state->fb; | |
4607 | bool pflip_needed; | |
4608 | struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); | |
4609 | ||
4610 | /*TODO Implement atomic check for cursor plane */ | |
4611 | if (plane->type == DRM_PLANE_TYPE_CURSOR) | |
4612 | continue; | |
4613 | ||
4614 | if (!fb || !plane_crtc || crtc != plane_crtc || !crtc_state->active) | |
4615 | continue; | |
4616 | ||
4617 | WARN_ON(!new_acrtc_state->stream); | |
4618 | ||
4619 | pflip_needed = !state->allow_modeset; | |
4620 | if (!pflip_needed) { | |
4621 | struct dc_surface *surface; | |
4622 | ||
4623 | surface = dc_create_surface(dc); | |
4624 | ||
4625 | ret = fill_plane_attributes( | |
4626 | plane_crtc->dev->dev_private, | |
4627 | surface, | |
4628 | plane_state, | |
4629 | crtc_state, | |
4630 | false); | |
4631 | if (ret) | |
4632 | goto fail; | |
4633 | ||
4634 | ||
4635 | if (dm_plane_state->surface) | |
4636 | dc_surface_release(dm_plane_state->surface); | |
4637 | ||
4638 | dm_plane_state->surface = surface; | |
4639 | ||
4640 | add_val_sets_surface(set, | |
4641 | set_count, | |
4642 | new_acrtc_state->stream, | |
4643 | surface); | |
4644 | ||
4645 | lock_and_validation_needed = true; | |
4646 | } | |
4647 | } | |
4648 | } | |
4649 | ||
4650 | /* Run this here since we want to validate the streams we created */ | |
4651 | ret = drm_atomic_helper_check_planes(dev, state); | |
4652 | if (ret) | |
4653 | goto fail; | |
4654 | ||
4655 | /* | |
4656 | * For full updates case when | |
4657 | * removing/adding/updating streams on once CRTC while flipping | |
4658 | * on another CRTC, | |
4659 | * acquiring global lock will guarantee that any such full | |
4660 | * update commit | |
4661 | * will wait for completion of any outstanding flip using DRMs | |
4662 | * synchronization events. | |
4663 | */ | |
4664 | ||
4665 | if (lock_and_validation_needed) { | |
4666 | ||
4667 | ret = do_aquire_global_lock(dev, state); | |
4668 | if (ret) | |
4669 | goto fail; | |
4670 | WARN_ON(dm_state->context); | |
4671 | dm_state->context = dc_get_validate_context(dc, set, set_count); | |
4672 | if (!dm_state->context) { | |
4673 | ret = -EINVAL; | |
4674 | goto fail; | |
4675 | } | |
4676 | } | |
4677 | ||
4678 | /* Must be success */ | |
4679 | WARN_ON(ret); | |
4680 | return ret; | |
4681 | ||
4682 | fail: | |
4683 | if (ret == -EDEADLK) | |
4684 | DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n"); | |
4685 | else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) | |
4686 | DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n"); | |
4687 | else | |
4688 | DRM_ERROR("Atomic check failed with err: %d .\n", ret); | |
4689 | ||
4690 | return ret; | |
4691 | } | |
4692 | ||
4693 | static bool is_dp_capable_without_timing_msa( | |
4694 | struct dc *dc, | |
4695 | struct amdgpu_connector *amdgpu_connector) | |
4696 | { | |
4697 | uint8_t dpcd_data; | |
4698 | bool capable = false; | |
4699 | ||
4700 | if (amdgpu_connector->dc_link && | |
4701 | dm_helpers_dp_read_dpcd( | |
4702 | NULL, | |
4703 | amdgpu_connector->dc_link, | |
4704 | DP_DOWN_STREAM_PORT_COUNT, | |
4705 | &dpcd_data, | |
4706 | sizeof(dpcd_data))) { | |
4707 | capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; | |
4708 | } | |
4709 | ||
4710 | return capable; | |
4711 | } | |
4712 | void amdgpu_dm_add_sink_to_freesync_module( | |
4713 | struct drm_connector *connector, | |
4714 | struct edid *edid) | |
4715 | { | |
4716 | int i; | |
4717 | uint64_t val_capable; | |
4718 | bool edid_check_required; | |
4719 | struct detailed_timing *timing; | |
4720 | struct detailed_non_pixel *data; | |
4721 | struct detailed_data_monitor_range *range; | |
4722 | struct amdgpu_connector *amdgpu_connector = | |
4723 | to_amdgpu_connector(connector); | |
4724 | ||
4725 | struct drm_device *dev = connector->dev; | |
4726 | struct amdgpu_device *adev = dev->dev_private; | |
4727 | edid_check_required = false; | |
4728 | if (!amdgpu_connector->dc_sink) { | |
4729 | DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); | |
4730 | return; | |
4731 | } | |
4732 | if (!adev->dm.freesync_module) | |
4733 | return; | |
4734 | /* | |
4735 | * if edid non zero restrict freesync only for dp and edp | |
4736 | */ | |
4737 | if (edid) { | |
4738 | if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT | |
4739 | || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { | |
4740 | edid_check_required = is_dp_capable_without_timing_msa( | |
4741 | adev->dm.dc, | |
4742 | amdgpu_connector); | |
4743 | } | |
4744 | } | |
4745 | val_capable = 0; | |
4746 | if (edid_check_required == true && (edid->version > 1 || | |
4747 | (edid->version == 1 && edid->revision > 1))) { | |
4748 | for (i = 0; i < 4; i++) { | |
4749 | ||
4750 | timing = &edid->detailed_timings[i]; | |
4751 | data = &timing->data.other_data; | |
4752 | range = &data->data.range; | |
4753 | /* | |
4754 | * Check if monitor has continuous frequency mode | |
4755 | */ | |
4756 | if (data->type != EDID_DETAIL_MONITOR_RANGE) | |
4757 | continue; | |
4758 | /* | |
4759 | * Check for flag range limits only. If flag == 1 then | |
4760 | * no additional timing information provided. | |
4761 | * Default GTF, GTF Secondary curve and CVT are not | |
4762 | * supported | |
4763 | */ | |
4764 | if (range->flags != 1) | |
4765 | continue; | |
4766 | ||
4767 | amdgpu_connector->min_vfreq = range->min_vfreq; | |
4768 | amdgpu_connector->max_vfreq = range->max_vfreq; | |
4769 | amdgpu_connector->pixel_clock_mhz = | |
4770 | range->pixel_clock_mhz * 10; | |
4771 | break; | |
4772 | } | |
4773 | ||
4774 | if (amdgpu_connector->max_vfreq - | |
4775 | amdgpu_connector->min_vfreq > 10) { | |
4776 | amdgpu_connector->caps.supported = true; | |
4777 | amdgpu_connector->caps.min_refresh_in_micro_hz = | |
4778 | amdgpu_connector->min_vfreq * 1000000; | |
4779 | amdgpu_connector->caps.max_refresh_in_micro_hz = | |
4780 | amdgpu_connector->max_vfreq * 1000000; | |
4781 | val_capable = 1; | |
4782 | } | |
4783 | } | |
4784 | ||
4785 | /* | |
4786 | * TODO figure out how to notify user-mode or DRM of freesync caps | |
4787 | * once we figure out how to deal with freesync in an upstreamable | |
4788 | * fashion | |
4789 | */ | |
4790 | ||
4791 | } | |
4792 | ||
4793 | void amdgpu_dm_remove_sink_from_freesync_module( | |
4794 | struct drm_connector *connector) | |
4795 | { | |
4796 | /* | |
4797 | * TODO fill in once we figure out how to deal with freesync in | |
4798 | * an upstreamable fashion | |
4799 | */ | |
4800 | } |