]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/dce_virtual.c
90bb08309a533cd2bad92de91ac9b3cc3bc3db7f
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / dce_virtual.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <drm/drmP.h>
24 #include "amdgpu.h"
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
27 #include "atom.h"
28 #include "amdgpu_pll.h"
29 #include "amdgpu_connectors.h"
30 #ifdef CONFIG_DRM_AMDGPU_SI
31 #include "dce_v6_0.h"
32 #endif
33 #ifdef CONFIG_DRM_AMDGPU_CIK
34 #include "dce_v8_0.h"
35 #endif
36 #include "dce_v10_0.h"
37 #include "dce_v11_0.h"
38 #include "dce_virtual.h"
39
40 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666
41
42
43 static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
44 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
45 static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
46 int index);
47
48 /**
49 * dce_virtual_vblank_wait - vblank wait asic callback.
50 *
51 * @adev: amdgpu_device pointer
52 * @crtc: crtc to wait for vblank on
53 *
54 * Wait for vblank on the requested crtc (evergreen+).
55 */
56 static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
57 {
58 return;
59 }
60
61 static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
62 {
63 return 0;
64 }
65
66 static void dce_virtual_page_flip(struct amdgpu_device *adev,
67 int crtc_id, u64 crtc_base, bool async)
68 {
69 return;
70 }
71
72 static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
73 u32 *vbl, u32 *position)
74 {
75 *vbl = 0;
76 *position = 0;
77
78 return -EINVAL;
79 }
80
81 static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
82 enum amdgpu_hpd_id hpd)
83 {
84 return true;
85 }
86
87 static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
88 enum amdgpu_hpd_id hpd)
89 {
90 return;
91 }
92
93 static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
94 {
95 return 0;
96 }
97
98 static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
99 struct amdgpu_mode_mc_save *save)
100 {
101 switch (adev->asic_type) {
102 #ifdef CONFIG_DRM_AMDGPU_SI
103 case CHIP_TAHITI:
104 case CHIP_PITCAIRN:
105 case CHIP_VERDE:
106 case CHIP_OLAND:
107 dce_v6_0_disable_dce(adev);
108 break;
109 #endif
110 #ifdef CONFIG_DRM_AMDGPU_CIK
111 case CHIP_BONAIRE:
112 case CHIP_HAWAII:
113 case CHIP_KAVERI:
114 case CHIP_KABINI:
115 case CHIP_MULLINS:
116 dce_v8_0_disable_dce(adev);
117 break;
118 #endif
119 case CHIP_FIJI:
120 case CHIP_TONGA:
121 dce_v10_0_disable_dce(adev);
122 break;
123 case CHIP_CARRIZO:
124 case CHIP_STONEY:
125 case CHIP_POLARIS10:
126 case CHIP_POLARIS11:
127 case CHIP_POLARIS12:
128 dce_v11_0_disable_dce(adev);
129 break;
130 case CHIP_TOPAZ:
131 #ifdef CONFIG_DRM_AMDGPU_SI
132 case CHIP_HAINAN:
133 #endif
134 /* no DCE */
135 return;
136 default:
137 DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
138 }
139
140 return;
141 }
142 static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
143 struct amdgpu_mode_mc_save *save)
144 {
145 return;
146 }
147
148 static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
149 bool render)
150 {
151 return;
152 }
153
154 /**
155 * dce_virtual_bandwidth_update - program display watermarks
156 *
157 * @adev: amdgpu_device pointer
158 *
159 * Calculate and program the display watermarks and line
160 * buffer allocation (CIK).
161 */
162 static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
163 {
164 return;
165 }
166
167 static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
168 u16 *green, u16 *blue, uint32_t size,
169 struct drm_modeset_acquire_ctx *ctx)
170 {
171 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
172 int i;
173
174 /* userspace palettes are always correct as is */
175 for (i = 0; i < size; i++) {
176 amdgpu_crtc->lut_r[i] = red[i] >> 6;
177 amdgpu_crtc->lut_g[i] = green[i] >> 6;
178 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
179 }
180
181 return 0;
182 }
183
184 static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
185 {
186 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
187
188 drm_crtc_cleanup(crtc);
189 kfree(amdgpu_crtc);
190 }
191
192 static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
193 .cursor_set2 = NULL,
194 .cursor_move = NULL,
195 .gamma_set = dce_virtual_crtc_gamma_set,
196 .set_config = amdgpu_crtc_set_config,
197 .destroy = dce_virtual_crtc_destroy,
198 .page_flip_target = amdgpu_crtc_page_flip_target,
199 };
200
201 static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
202 {
203 struct drm_device *dev = crtc->dev;
204 struct amdgpu_device *adev = dev->dev_private;
205 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
206 unsigned type;
207
208 if (amdgpu_sriov_vf(adev))
209 return;
210
211 switch (mode) {
212 case DRM_MODE_DPMS_ON:
213 amdgpu_crtc->enabled = true;
214 /* Make sure VBLANK interrupts are still enabled */
215 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
216 amdgpu_irq_update(adev, &adev->crtc_irq, type);
217 drm_crtc_vblank_on(crtc);
218 break;
219 case DRM_MODE_DPMS_STANDBY:
220 case DRM_MODE_DPMS_SUSPEND:
221 case DRM_MODE_DPMS_OFF:
222 drm_crtc_vblank_off(crtc);
223 amdgpu_crtc->enabled = false;
224 break;
225 }
226 }
227
228
229 static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
230 {
231 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
232 }
233
234 static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
235 {
236 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
237 }
238
239 static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
240 {
241 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
242
243 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
244 if (crtc->primary->fb) {
245 int r;
246 struct amdgpu_framebuffer *amdgpu_fb;
247 struct amdgpu_bo *abo;
248
249 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
250 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
251 r = amdgpu_bo_reserve(abo, true);
252 if (unlikely(r))
253 DRM_ERROR("failed to reserve abo before unpin\n");
254 else {
255 amdgpu_bo_unpin(abo);
256 amdgpu_bo_unreserve(abo);
257 }
258 }
259
260 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
261 amdgpu_crtc->encoder = NULL;
262 amdgpu_crtc->connector = NULL;
263 }
264
265 static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
266 struct drm_display_mode *mode,
267 struct drm_display_mode *adjusted_mode,
268 int x, int y, struct drm_framebuffer *old_fb)
269 {
270 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
271
272 /* update the hw version fpr dpm */
273 amdgpu_crtc->hw_mode = *adjusted_mode;
274
275 return 0;
276 }
277
278 static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
279 const struct drm_display_mode *mode,
280 struct drm_display_mode *adjusted_mode)
281 {
282 return true;
283 }
284
285
286 static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
287 struct drm_framebuffer *old_fb)
288 {
289 return 0;
290 }
291
292 static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
293 {
294 return;
295 }
296
297 static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
298 struct drm_framebuffer *fb,
299 int x, int y, enum mode_set_atomic state)
300 {
301 return 0;
302 }
303
304 static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
305 .dpms = dce_virtual_crtc_dpms,
306 .mode_fixup = dce_virtual_crtc_mode_fixup,
307 .mode_set = dce_virtual_crtc_mode_set,
308 .mode_set_base = dce_virtual_crtc_set_base,
309 .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
310 .prepare = dce_virtual_crtc_prepare,
311 .commit = dce_virtual_crtc_commit,
312 .load_lut = dce_virtual_crtc_load_lut,
313 .disable = dce_virtual_crtc_disable,
314 };
315
316 static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
317 {
318 struct amdgpu_crtc *amdgpu_crtc;
319 int i;
320
321 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
322 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
323 if (amdgpu_crtc == NULL)
324 return -ENOMEM;
325
326 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
327
328 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
329 amdgpu_crtc->crtc_id = index;
330 adev->mode_info.crtcs[index] = amdgpu_crtc;
331
332 for (i = 0; i < 256; i++) {
333 amdgpu_crtc->lut_r[i] = i << 2;
334 amdgpu_crtc->lut_g[i] = i << 2;
335 amdgpu_crtc->lut_b[i] = i << 2;
336 }
337
338 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
339 amdgpu_crtc->encoder = NULL;
340 amdgpu_crtc->connector = NULL;
341 amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
342 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
343
344 return 0;
345 }
346
347 static int dce_virtual_early_init(void *handle)
348 {
349 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
350
351 dce_virtual_set_display_funcs(adev);
352 dce_virtual_set_irq_funcs(adev);
353
354 adev->mode_info.num_hpd = 1;
355 adev->mode_info.num_dig = 1;
356 return 0;
357 }
358
359 static struct drm_encoder *
360 dce_virtual_encoder(struct drm_connector *connector)
361 {
362 int enc_id = connector->encoder_ids[0];
363 struct drm_encoder *encoder;
364 int i;
365
366 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
367 if (connector->encoder_ids[i] == 0)
368 break;
369
370 encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
371 if (!encoder)
372 continue;
373
374 if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
375 return encoder;
376 }
377
378 /* pick the first one */
379 if (enc_id)
380 return drm_encoder_find(connector->dev, enc_id);
381 return NULL;
382 }
383
384 static int dce_virtual_get_modes(struct drm_connector *connector)
385 {
386 struct drm_device *dev = connector->dev;
387 struct drm_display_mode *mode = NULL;
388 unsigned i;
389 static const struct mode_size {
390 int w;
391 int h;
392 } common_modes[17] = {
393 { 640, 480},
394 { 720, 480},
395 { 800, 600},
396 { 848, 480},
397 {1024, 768},
398 {1152, 768},
399 {1280, 720},
400 {1280, 800},
401 {1280, 854},
402 {1280, 960},
403 {1280, 1024},
404 {1440, 900},
405 {1400, 1050},
406 {1680, 1050},
407 {1600, 1200},
408 {1920, 1080},
409 {1920, 1200}
410 };
411
412 for (i = 0; i < 17; i++) {
413 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
414 drm_mode_probed_add(connector, mode);
415 }
416
417 return 0;
418 }
419
420 static int dce_virtual_mode_valid(struct drm_connector *connector,
421 struct drm_display_mode *mode)
422 {
423 return MODE_OK;
424 }
425
426 static int
427 dce_virtual_dpms(struct drm_connector *connector, int mode)
428 {
429 return 0;
430 }
431
432 static int
433 dce_virtual_set_property(struct drm_connector *connector,
434 struct drm_property *property,
435 uint64_t val)
436 {
437 return 0;
438 }
439
440 static void dce_virtual_destroy(struct drm_connector *connector)
441 {
442 drm_connector_unregister(connector);
443 drm_connector_cleanup(connector);
444 kfree(connector);
445 }
446
447 static void dce_virtual_force(struct drm_connector *connector)
448 {
449 return;
450 }
451
452 static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
453 .get_modes = dce_virtual_get_modes,
454 .mode_valid = dce_virtual_mode_valid,
455 .best_encoder = dce_virtual_encoder,
456 };
457
458 static const struct drm_connector_funcs dce_virtual_connector_funcs = {
459 .dpms = dce_virtual_dpms,
460 .fill_modes = drm_helper_probe_single_connector_modes,
461 .set_property = dce_virtual_set_property,
462 .destroy = dce_virtual_destroy,
463 .force = dce_virtual_force,
464 };
465
466 static int dce_virtual_sw_init(void *handle)
467 {
468 int r, i;
469 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
470
471 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
472 if (r)
473 return r;
474
475 adev->ddev->max_vblank_count = 0;
476
477 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
478
479 adev->ddev->mode_config.max_width = 16384;
480 adev->ddev->mode_config.max_height = 16384;
481
482 adev->ddev->mode_config.preferred_depth = 24;
483 adev->ddev->mode_config.prefer_shadow = 1;
484
485 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
486
487 r = amdgpu_modeset_create_props(adev);
488 if (r)
489 return r;
490
491 adev->ddev->mode_config.max_width = 16384;
492 adev->ddev->mode_config.max_height = 16384;
493
494 /* allocate crtcs, encoders, connectors */
495 for (i = 0; i < adev->mode_info.num_crtc; i++) {
496 r = dce_virtual_crtc_init(adev, i);
497 if (r)
498 return r;
499 r = dce_virtual_connector_encoder_init(adev, i);
500 if (r)
501 return r;
502 }
503
504 drm_kms_helper_poll_init(adev->ddev);
505
506 adev->mode_info.mode_config_initialized = true;
507 return 0;
508 }
509
510 static int dce_virtual_sw_fini(void *handle)
511 {
512 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
513
514 kfree(adev->mode_info.bios_hardcoded_edid);
515
516 drm_kms_helper_poll_fini(adev->ddev);
517
518 drm_mode_config_cleanup(adev->ddev);
519 adev->mode_info.mode_config_initialized = false;
520 return 0;
521 }
522
523 static int dce_virtual_hw_init(void *handle)
524 {
525 return 0;
526 }
527
528 static int dce_virtual_hw_fini(void *handle)
529 {
530 return 0;
531 }
532
533 static int dce_virtual_suspend(void *handle)
534 {
535 return dce_virtual_hw_fini(handle);
536 }
537
538 static int dce_virtual_resume(void *handle)
539 {
540 return dce_virtual_hw_init(handle);
541 }
542
543 static bool dce_virtual_is_idle(void *handle)
544 {
545 return true;
546 }
547
548 static int dce_virtual_wait_for_idle(void *handle)
549 {
550 return 0;
551 }
552
553 static int dce_virtual_soft_reset(void *handle)
554 {
555 return 0;
556 }
557
558 static int dce_virtual_set_clockgating_state(void *handle,
559 enum amd_clockgating_state state)
560 {
561 return 0;
562 }
563
564 static int dce_virtual_set_powergating_state(void *handle,
565 enum amd_powergating_state state)
566 {
567 return 0;
568 }
569
570 static const struct amd_ip_funcs dce_virtual_ip_funcs = {
571 .name = "dce_virtual",
572 .early_init = dce_virtual_early_init,
573 .late_init = NULL,
574 .sw_init = dce_virtual_sw_init,
575 .sw_fini = dce_virtual_sw_fini,
576 .hw_init = dce_virtual_hw_init,
577 .hw_fini = dce_virtual_hw_fini,
578 .suspend = dce_virtual_suspend,
579 .resume = dce_virtual_resume,
580 .is_idle = dce_virtual_is_idle,
581 .wait_for_idle = dce_virtual_wait_for_idle,
582 .soft_reset = dce_virtual_soft_reset,
583 .set_clockgating_state = dce_virtual_set_clockgating_state,
584 .set_powergating_state = dce_virtual_set_powergating_state,
585 };
586
587 /* these are handled by the primary encoders */
588 static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
589 {
590 return;
591 }
592
593 static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
594 {
595 return;
596 }
597
598 static void
599 dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
600 struct drm_display_mode *mode,
601 struct drm_display_mode *adjusted_mode)
602 {
603 return;
604 }
605
606 static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
607 {
608 return;
609 }
610
611 static void
612 dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
613 {
614 return;
615 }
616
617 static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
618 const struct drm_display_mode *mode,
619 struct drm_display_mode *adjusted_mode)
620 {
621 return true;
622 }
623
624 static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
625 .dpms = dce_virtual_encoder_dpms,
626 .mode_fixup = dce_virtual_encoder_mode_fixup,
627 .prepare = dce_virtual_encoder_prepare,
628 .mode_set = dce_virtual_encoder_mode_set,
629 .commit = dce_virtual_encoder_commit,
630 .disable = dce_virtual_encoder_disable,
631 };
632
633 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
634 {
635 drm_encoder_cleanup(encoder);
636 kfree(encoder);
637 }
638
639 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
640 .destroy = dce_virtual_encoder_destroy,
641 };
642
643 static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
644 int index)
645 {
646 struct drm_encoder *encoder;
647 struct drm_connector *connector;
648
649 /* add a new encoder */
650 encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
651 if (!encoder)
652 return -ENOMEM;
653 encoder->possible_crtcs = 1 << index;
654 drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
655 DRM_MODE_ENCODER_VIRTUAL, NULL);
656 drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
657
658 connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
659 if (!connector) {
660 kfree(encoder);
661 return -ENOMEM;
662 }
663
664 /* add a new connector */
665 drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
666 DRM_MODE_CONNECTOR_VIRTUAL);
667 drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
668 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
669 connector->interlace_allowed = false;
670 connector->doublescan_allowed = false;
671 drm_connector_register(connector);
672
673 /* link them */
674 drm_mode_connector_attach_encoder(connector, encoder);
675
676 return 0;
677 }
678
679 static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
680 .set_vga_render_state = &dce_virtual_set_vga_render_state,
681 .bandwidth_update = &dce_virtual_bandwidth_update,
682 .vblank_get_counter = &dce_virtual_vblank_get_counter,
683 .vblank_wait = &dce_virtual_vblank_wait,
684 .backlight_set_level = NULL,
685 .backlight_get_level = NULL,
686 .hpd_sense = &dce_virtual_hpd_sense,
687 .hpd_set_polarity = &dce_virtual_hpd_set_polarity,
688 .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
689 .page_flip = &dce_virtual_page_flip,
690 .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
691 .add_encoder = NULL,
692 .add_connector = NULL,
693 .stop_mc_access = &dce_virtual_stop_mc_access,
694 .resume_mc_access = &dce_virtual_resume_mc_access,
695 };
696
697 static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
698 {
699 if (adev->mode_info.funcs == NULL)
700 adev->mode_info.funcs = &dce_virtual_display_funcs;
701 }
702
703 static int dce_virtual_pageflip(struct amdgpu_device *adev,
704 unsigned crtc_id)
705 {
706 unsigned long flags;
707 struct amdgpu_crtc *amdgpu_crtc;
708 struct amdgpu_flip_work *works;
709
710 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
711
712 if (crtc_id >= adev->mode_info.num_crtc) {
713 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
714 return -EINVAL;
715 }
716
717 /* IRQ could occur when in initial stage */
718 if (amdgpu_crtc == NULL)
719 return 0;
720
721 spin_lock_irqsave(&adev->ddev->event_lock, flags);
722 works = amdgpu_crtc->pflip_works;
723 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
724 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
725 "AMDGPU_FLIP_SUBMITTED(%d)\n",
726 amdgpu_crtc->pflip_status,
727 AMDGPU_FLIP_SUBMITTED);
728 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
729 return 0;
730 }
731
732 /* page flip completed. clean up */
733 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
734 amdgpu_crtc->pflip_works = NULL;
735
736 /* wakeup usersapce */
737 if (works->event)
738 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
739
740 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
741
742 drm_crtc_vblank_put(&amdgpu_crtc->base);
743 schedule_work(&works->unpin_work);
744
745 return 0;
746 }
747
748 static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
749 {
750 struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
751 struct amdgpu_crtc, vblank_timer);
752 struct drm_device *ddev = amdgpu_crtc->base.dev;
753 struct amdgpu_device *adev = ddev->dev_private;
754
755 drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
756 dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
757 hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
758 HRTIMER_MODE_REL);
759
760 return HRTIMER_NORESTART;
761 }
762
763 static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
764 int crtc,
765 enum amdgpu_interrupt_state state)
766 {
767 if (crtc >= adev->mode_info.num_crtc) {
768 DRM_DEBUG("invalid crtc %d\n", crtc);
769 return;
770 }
771
772 if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
773 DRM_DEBUG("Enable software vsync timer\n");
774 hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
775 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
776 hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
777 DCE_VIRTUAL_VBLANK_PERIOD);
778 adev->mode_info.crtcs[crtc]->vblank_timer.function =
779 dce_virtual_vblank_timer_handle;
780 hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
781 DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
782 } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
783 DRM_DEBUG("Disable software vsync timer\n");
784 hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
785 }
786
787 adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
788 DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
789 }
790
791
792 static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
793 struct amdgpu_irq_src *source,
794 unsigned type,
795 enum amdgpu_interrupt_state state)
796 {
797 if (type > AMDGPU_CRTC_IRQ_VBLANK6)
798 return -EINVAL;
799
800 dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
801
802 return 0;
803 }
804
805 static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
806 .set = dce_virtual_set_crtc_irq_state,
807 .process = NULL,
808 };
809
810 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
811 {
812 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
813 adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
814 }
815
816 const struct amdgpu_ip_block_version dce_virtual_ip_block =
817 {
818 .type = AMD_IP_BLOCK_TYPE_DCE,
819 .major = 1,
820 .minor = 0,
821 .rev = 0,
822 .funcs = &dce_virtual_ip_funcs,
823 };