]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/gpu/drm/i915/intel_dpll_mgr.c
cpumask: Make for_each_cpu_wrap() available on UP as well
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / intel_dpll_mgr.c
1 /*
2 * Copyright © 2006-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "intel_drv.h"
25
26 /**
27 * DOC: Display PLLs
28 *
29 * Display PLLs used for driving outputs vary by platform. While some have
30 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
31 * from a pool. In the latter scenario, it is possible that multiple pipes
32 * share a PLL if their configurations match.
33 *
34 * This file provides an abstraction over display PLLs. The function
35 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
36 * users of a PLL are tracked and that tracking is integrated with the atomic
37 * modest interface. During an atomic operation, a PLL can be requested for a
38 * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
39 * a previously used PLL can be released with intel_release_shared_dpll().
40 * Changes to the users are first staged in the atomic state, and then made
41 * effective by calling intel_shared_dpll_swap_state() during the atomic
42 * commit phase.
43 */
44
45 static void
46 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
47 struct intel_shared_dpll_state *shared_dpll)
48 {
49 enum intel_dpll_id i;
50
51 /* Copy shared dpll state */
52 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
53 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
54
55 shared_dpll[i] = pll->state;
56 }
57 }
58
59 static struct intel_shared_dpll_state *
60 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
61 {
62 struct intel_atomic_state *state = to_intel_atomic_state(s);
63
64 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
65
66 if (!state->dpll_set) {
67 state->dpll_set = true;
68
69 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
70 state->shared_dpll);
71 }
72
73 return state->shared_dpll;
74 }
75
76 /**
77 * intel_get_shared_dpll_by_id - get a DPLL given its id
78 * @dev_priv: i915 device instance
79 * @id: pll id
80 *
81 * Returns:
82 * A pointer to the DPLL with @id
83 */
84 struct intel_shared_dpll *
85 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
86 enum intel_dpll_id id)
87 {
88 return &dev_priv->shared_dplls[id];
89 }
90
91 /**
92 * intel_get_shared_dpll_id - get the id of a DPLL
93 * @dev_priv: i915 device instance
94 * @pll: the DPLL
95 *
96 * Returns:
97 * The id of @pll
98 */
99 enum intel_dpll_id
100 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
101 struct intel_shared_dpll *pll)
102 {
103 if (WARN_ON(pll < dev_priv->shared_dplls||
104 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
105 return -1;
106
107 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
108 }
109
110 /* For ILK+ */
111 void assert_shared_dpll(struct drm_i915_private *dev_priv,
112 struct intel_shared_dpll *pll,
113 bool state)
114 {
115 bool cur_state;
116 struct intel_dpll_hw_state hw_state;
117
118 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
119 return;
120
121 cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
122 I915_STATE_WARN(cur_state != state,
123 "%s assertion failure (expected %s, current %s)\n",
124 pll->name, onoff(state), onoff(cur_state));
125 }
126
127 /**
128 * intel_prepare_shared_dpll - call a dpll's prepare hook
129 * @crtc: CRTC which has a shared dpll
130 *
131 * This calls the PLL's prepare hook if it has one and if the PLL is not
132 * already enabled. The prepare hook is platform specific.
133 */
134 void intel_prepare_shared_dpll(struct intel_crtc *crtc)
135 {
136 struct drm_device *dev = crtc->base.dev;
137 struct drm_i915_private *dev_priv = to_i915(dev);
138 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
139
140 if (WARN_ON(pll == NULL))
141 return;
142
143 mutex_lock(&dev_priv->dpll_lock);
144 WARN_ON(!pll->state.crtc_mask);
145 if (!pll->active_mask) {
146 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
147 WARN_ON(pll->on);
148 assert_shared_dpll_disabled(dev_priv, pll);
149
150 pll->funcs.prepare(dev_priv, pll);
151 }
152 mutex_unlock(&dev_priv->dpll_lock);
153 }
154
155 /**
156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
157 * @crtc: CRTC which has a shared DPLL
158 *
159 * Enable the shared DPLL used by @crtc.
160 */
161 void intel_enable_shared_dpll(struct intel_crtc *crtc)
162 {
163 struct drm_device *dev = crtc->base.dev;
164 struct drm_i915_private *dev_priv = to_i915(dev);
165 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
166 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
167 unsigned old_mask;
168
169 if (WARN_ON(pll == NULL))
170 return;
171
172 mutex_lock(&dev_priv->dpll_lock);
173 old_mask = pll->active_mask;
174
175 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
176 WARN_ON(pll->active_mask & crtc_mask))
177 goto out;
178
179 pll->active_mask |= crtc_mask;
180
181 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
182 pll->name, pll->active_mask, pll->on,
183 crtc->base.base.id);
184
185 if (old_mask) {
186 WARN_ON(!pll->on);
187 assert_shared_dpll_enabled(dev_priv, pll);
188 goto out;
189 }
190 WARN_ON(pll->on);
191
192 DRM_DEBUG_KMS("enabling %s\n", pll->name);
193 pll->funcs.enable(dev_priv, pll);
194 pll->on = true;
195
196 out:
197 mutex_unlock(&dev_priv->dpll_lock);
198 }
199
200 /**
201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
202 * @crtc: CRTC which has a shared DPLL
203 *
204 * Disable the shared DPLL used by @crtc.
205 */
206 void intel_disable_shared_dpll(struct intel_crtc *crtc)
207 {
208 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
209 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
210 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
211
212 /* PCH only available on ILK+ */
213 if (INTEL_GEN(dev_priv) < 5)
214 return;
215
216 if (pll == NULL)
217 return;
218
219 mutex_lock(&dev_priv->dpll_lock);
220 if (WARN_ON(!(pll->active_mask & crtc_mask)))
221 goto out;
222
223 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
224 pll->name, pll->active_mask, pll->on,
225 crtc->base.base.id);
226
227 assert_shared_dpll_enabled(dev_priv, pll);
228 WARN_ON(!pll->on);
229
230 pll->active_mask &= ~crtc_mask;
231 if (pll->active_mask)
232 goto out;
233
234 DRM_DEBUG_KMS("disabling %s\n", pll->name);
235 pll->funcs.disable(dev_priv, pll);
236 pll->on = false;
237
238 out:
239 mutex_unlock(&dev_priv->dpll_lock);
240 }
241
242 static struct intel_shared_dpll *
243 intel_find_shared_dpll(struct intel_crtc *crtc,
244 struct intel_crtc_state *crtc_state,
245 enum intel_dpll_id range_min,
246 enum intel_dpll_id range_max)
247 {
248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
249 struct intel_shared_dpll *pll;
250 struct intel_shared_dpll_state *shared_dpll;
251 enum intel_dpll_id i;
252
253 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
254
255 for (i = range_min; i <= range_max; i++) {
256 pll = &dev_priv->shared_dplls[i];
257
258 /* Only want to check enabled timings first */
259 if (shared_dpll[i].crtc_mask == 0)
260 continue;
261
262 if (memcmp(&crtc_state->dpll_hw_state,
263 &shared_dpll[i].hw_state,
264 sizeof(crtc_state->dpll_hw_state)) == 0) {
265 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
266 crtc->base.base.id, crtc->base.name, pll->name,
267 shared_dpll[i].crtc_mask,
268 pll->active_mask);
269 return pll;
270 }
271 }
272
273 /* Ok no matching timings, maybe there's a free one? */
274 for (i = range_min; i <= range_max; i++) {
275 pll = &dev_priv->shared_dplls[i];
276 if (shared_dpll[i].crtc_mask == 0) {
277 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
278 crtc->base.base.id, crtc->base.name, pll->name);
279 return pll;
280 }
281 }
282
283 return NULL;
284 }
285
286 static void
287 intel_reference_shared_dpll(struct intel_shared_dpll *pll,
288 struct intel_crtc_state *crtc_state)
289 {
290 struct intel_shared_dpll_state *shared_dpll;
291 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
292 enum intel_dpll_id i = pll->id;
293
294 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
295
296 if (shared_dpll[i].crtc_mask == 0)
297 shared_dpll[i].hw_state =
298 crtc_state->dpll_hw_state;
299
300 crtc_state->shared_dpll = pll;
301 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
302 pipe_name(crtc->pipe));
303
304 shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
305 }
306
307 /**
308 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
309 * @state: atomic state
310 *
311 * This is the dpll version of drm_atomic_helper_swap_state() since the
312 * helper does not handle driver-specific global state.
313 *
314 * For consistency with atomic helpers this function does a complete swap,
315 * i.e. it also puts the current state into @state, even though there is no
316 * need for that at this moment.
317 */
318 void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
319 {
320 struct drm_i915_private *dev_priv = to_i915(state->dev);
321 struct intel_shared_dpll_state *shared_dpll;
322 struct intel_shared_dpll *pll;
323 enum intel_dpll_id i;
324
325 if (!to_intel_atomic_state(state)->dpll_set)
326 return;
327
328 shared_dpll = to_intel_atomic_state(state)->shared_dpll;
329 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
330 struct intel_shared_dpll_state tmp;
331
332 pll = &dev_priv->shared_dplls[i];
333
334 tmp = pll->state;
335 pll->state = shared_dpll[i];
336 shared_dpll[i] = tmp;
337 }
338 }
339
340 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
341 struct intel_shared_dpll *pll,
342 struct intel_dpll_hw_state *hw_state)
343 {
344 uint32_t val;
345
346 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
347 return false;
348
349 val = I915_READ(PCH_DPLL(pll->id));
350 hw_state->dpll = val;
351 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
352 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
353
354 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
355
356 return val & DPLL_VCO_ENABLE;
357 }
358
359 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
360 struct intel_shared_dpll *pll)
361 {
362 I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
363 I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
364 }
365
366 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
367 {
368 u32 val;
369 bool enabled;
370
371 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
372
373 val = I915_READ(PCH_DREF_CONTROL);
374 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
375 DREF_SUPERSPREAD_SOURCE_MASK));
376 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
377 }
378
379 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
380 struct intel_shared_dpll *pll)
381 {
382 /* PCH refclock must be enabled first */
383 ibx_assert_pch_refclk_enabled(dev_priv);
384
385 I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
386
387 /* Wait for the clocks to stabilize. */
388 POSTING_READ(PCH_DPLL(pll->id));
389 udelay(150);
390
391 /* The pixel multiplier can only be updated once the
392 * DPLL is enabled and the clocks are stable.
393 *
394 * So write it again.
395 */
396 I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
397 POSTING_READ(PCH_DPLL(pll->id));
398 udelay(200);
399 }
400
401 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
402 struct intel_shared_dpll *pll)
403 {
404 struct drm_device *dev = &dev_priv->drm;
405 struct intel_crtc *crtc;
406
407 /* Make sure no transcoder isn't still depending on us. */
408 for_each_intel_crtc(dev, crtc) {
409 if (crtc->config->shared_dpll == pll)
410 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
411 }
412
413 I915_WRITE(PCH_DPLL(pll->id), 0);
414 POSTING_READ(PCH_DPLL(pll->id));
415 udelay(200);
416 }
417
418 static struct intel_shared_dpll *
419 ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
420 struct intel_encoder *encoder)
421 {
422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
423 struct intel_shared_dpll *pll;
424 enum intel_dpll_id i;
425
426 if (HAS_PCH_IBX(dev_priv)) {
427 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
428 i = (enum intel_dpll_id) crtc->pipe;
429 pll = &dev_priv->shared_dplls[i];
430
431 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
432 crtc->base.base.id, crtc->base.name, pll->name);
433 } else {
434 pll = intel_find_shared_dpll(crtc, crtc_state,
435 DPLL_ID_PCH_PLL_A,
436 DPLL_ID_PCH_PLL_B);
437 }
438
439 if (!pll)
440 return NULL;
441
442 /* reference the pll */
443 intel_reference_shared_dpll(pll, crtc_state);
444
445 return pll;
446 }
447
448 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
449 struct intel_dpll_hw_state *hw_state)
450 {
451 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
452 "fp0: 0x%x, fp1: 0x%x\n",
453 hw_state->dpll,
454 hw_state->dpll_md,
455 hw_state->fp0,
456 hw_state->fp1);
457 }
458
459 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
460 .prepare = ibx_pch_dpll_prepare,
461 .enable = ibx_pch_dpll_enable,
462 .disable = ibx_pch_dpll_disable,
463 .get_hw_state = ibx_pch_dpll_get_hw_state,
464 };
465
466 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
467 struct intel_shared_dpll *pll)
468 {
469 I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
470 POSTING_READ(WRPLL_CTL(pll->id));
471 udelay(20);
472 }
473
474 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
475 struct intel_shared_dpll *pll)
476 {
477 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
478 POSTING_READ(SPLL_CTL);
479 udelay(20);
480 }
481
482 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
483 struct intel_shared_dpll *pll)
484 {
485 uint32_t val;
486
487 val = I915_READ(WRPLL_CTL(pll->id));
488 I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
489 POSTING_READ(WRPLL_CTL(pll->id));
490 }
491
492 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
493 struct intel_shared_dpll *pll)
494 {
495 uint32_t val;
496
497 val = I915_READ(SPLL_CTL);
498 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
499 POSTING_READ(SPLL_CTL);
500 }
501
502 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
503 struct intel_shared_dpll *pll,
504 struct intel_dpll_hw_state *hw_state)
505 {
506 uint32_t val;
507
508 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
509 return false;
510
511 val = I915_READ(WRPLL_CTL(pll->id));
512 hw_state->wrpll = val;
513
514 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
515
516 return val & WRPLL_PLL_ENABLE;
517 }
518
519 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
520 struct intel_shared_dpll *pll,
521 struct intel_dpll_hw_state *hw_state)
522 {
523 uint32_t val;
524
525 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
526 return false;
527
528 val = I915_READ(SPLL_CTL);
529 hw_state->spll = val;
530
531 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
532
533 return val & SPLL_PLL_ENABLE;
534 }
535
536 #define LC_FREQ 2700
537 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
538
539 #define P_MIN 2
540 #define P_MAX 64
541 #define P_INC 2
542
543 /* Constraints for PLL good behavior */
544 #define REF_MIN 48
545 #define REF_MAX 400
546 #define VCO_MIN 2400
547 #define VCO_MAX 4800
548
549 struct hsw_wrpll_rnp {
550 unsigned p, n2, r2;
551 };
552
553 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
554 {
555 unsigned budget;
556
557 switch (clock) {
558 case 25175000:
559 case 25200000:
560 case 27000000:
561 case 27027000:
562 case 37762500:
563 case 37800000:
564 case 40500000:
565 case 40541000:
566 case 54000000:
567 case 54054000:
568 case 59341000:
569 case 59400000:
570 case 72000000:
571 case 74176000:
572 case 74250000:
573 case 81000000:
574 case 81081000:
575 case 89012000:
576 case 89100000:
577 case 108000000:
578 case 108108000:
579 case 111264000:
580 case 111375000:
581 case 148352000:
582 case 148500000:
583 case 162000000:
584 case 162162000:
585 case 222525000:
586 case 222750000:
587 case 296703000:
588 case 297000000:
589 budget = 0;
590 break;
591 case 233500000:
592 case 245250000:
593 case 247750000:
594 case 253250000:
595 case 298000000:
596 budget = 1500;
597 break;
598 case 169128000:
599 case 169500000:
600 case 179500000:
601 case 202000000:
602 budget = 2000;
603 break;
604 case 256250000:
605 case 262500000:
606 case 270000000:
607 case 272500000:
608 case 273750000:
609 case 280750000:
610 case 281250000:
611 case 286000000:
612 case 291750000:
613 budget = 4000;
614 break;
615 case 267250000:
616 case 268500000:
617 budget = 5000;
618 break;
619 default:
620 budget = 1000;
621 break;
622 }
623
624 return budget;
625 }
626
627 static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
628 unsigned r2, unsigned n2, unsigned p,
629 struct hsw_wrpll_rnp *best)
630 {
631 uint64_t a, b, c, d, diff, diff_best;
632
633 /* No best (r,n,p) yet */
634 if (best->p == 0) {
635 best->p = p;
636 best->n2 = n2;
637 best->r2 = r2;
638 return;
639 }
640
641 /*
642 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
643 * freq2k.
644 *
645 * delta = 1e6 *
646 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
647 * freq2k;
648 *
649 * and we would like delta <= budget.
650 *
651 * If the discrepancy is above the PPM-based budget, always prefer to
652 * improve upon the previous solution. However, if you're within the
653 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
654 */
655 a = freq2k * budget * p * r2;
656 b = freq2k * budget * best->p * best->r2;
657 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
658 diff_best = abs_diff(freq2k * best->p * best->r2,
659 LC_FREQ_2K * best->n2);
660 c = 1000000 * diff;
661 d = 1000000 * diff_best;
662
663 if (a < c && b < d) {
664 /* If both are above the budget, pick the closer */
665 if (best->p * best->r2 * diff < p * r2 * diff_best) {
666 best->p = p;
667 best->n2 = n2;
668 best->r2 = r2;
669 }
670 } else if (a >= c && b < d) {
671 /* If A is below the threshold but B is above it? Update. */
672 best->p = p;
673 best->n2 = n2;
674 best->r2 = r2;
675 } else if (a >= c && b >= d) {
676 /* Both are below the limit, so pick the higher n2/(r2*r2) */
677 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
678 best->p = p;
679 best->n2 = n2;
680 best->r2 = r2;
681 }
682 }
683 /* Otherwise a < c && b >= d, do nothing */
684 }
685
686 static void
687 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
688 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
689 {
690 uint64_t freq2k;
691 unsigned p, n2, r2;
692 struct hsw_wrpll_rnp best = { 0, 0, 0 };
693 unsigned budget;
694
695 freq2k = clock / 100;
696
697 budget = hsw_wrpll_get_budget_for_freq(clock);
698
699 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
700 * and directly pass the LC PLL to it. */
701 if (freq2k == 5400000) {
702 *n2_out = 2;
703 *p_out = 1;
704 *r2_out = 2;
705 return;
706 }
707
708 /*
709 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
710 * the WR PLL.
711 *
712 * We want R so that REF_MIN <= Ref <= REF_MAX.
713 * Injecting R2 = 2 * R gives:
714 * REF_MAX * r2 > LC_FREQ * 2 and
715 * REF_MIN * r2 < LC_FREQ * 2
716 *
717 * Which means the desired boundaries for r2 are:
718 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
719 *
720 */
721 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
722 r2 <= LC_FREQ * 2 / REF_MIN;
723 r2++) {
724
725 /*
726 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
727 *
728 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
729 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
730 * VCO_MAX * r2 > n2 * LC_FREQ and
731 * VCO_MIN * r2 < n2 * LC_FREQ)
732 *
733 * Which means the desired boundaries for n2 are:
734 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
735 */
736 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
737 n2 <= VCO_MAX * r2 / LC_FREQ;
738 n2++) {
739
740 for (p = P_MIN; p <= P_MAX; p += P_INC)
741 hsw_wrpll_update_rnp(freq2k, budget,
742 r2, n2, p, &best);
743 }
744 }
745
746 *n2_out = best.n2;
747 *p_out = best.p;
748 *r2_out = best.r2;
749 }
750
751 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
752 struct intel_crtc *crtc,
753 struct intel_crtc_state *crtc_state)
754 {
755 struct intel_shared_dpll *pll;
756 uint32_t val;
757 unsigned int p, n2, r2;
758
759 hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
760
761 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
762 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
763 WRPLL_DIVIDER_POST(p);
764
765 crtc_state->dpll_hw_state.wrpll = val;
766
767 pll = intel_find_shared_dpll(crtc, crtc_state,
768 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
769
770 if (!pll)
771 return NULL;
772
773 return pll;
774 }
775
776 static struct intel_shared_dpll *
777 hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
778 {
779 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
780 struct intel_shared_dpll *pll;
781 enum intel_dpll_id pll_id;
782
783 switch (clock / 2) {
784 case 81000:
785 pll_id = DPLL_ID_LCPLL_810;
786 break;
787 case 135000:
788 pll_id = DPLL_ID_LCPLL_1350;
789 break;
790 case 270000:
791 pll_id = DPLL_ID_LCPLL_2700;
792 break;
793 default:
794 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
795 return NULL;
796 }
797
798 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
799
800 if (!pll)
801 return NULL;
802
803 return pll;
804 }
805
806 static struct intel_shared_dpll *
807 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
808 struct intel_encoder *encoder)
809 {
810 struct intel_shared_dpll *pll;
811 int clock = crtc_state->port_clock;
812
813 memset(&crtc_state->dpll_hw_state, 0,
814 sizeof(crtc_state->dpll_hw_state));
815
816 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
817 pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
818 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
819 pll = hsw_ddi_dp_get_dpll(encoder, clock);
820 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
821 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
822 return NULL;
823
824 crtc_state->dpll_hw_state.spll =
825 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
826
827 pll = intel_find_shared_dpll(crtc, crtc_state,
828 DPLL_ID_SPLL, DPLL_ID_SPLL);
829 } else {
830 return NULL;
831 }
832
833 if (!pll)
834 return NULL;
835
836 intel_reference_shared_dpll(pll, crtc_state);
837
838 return pll;
839 }
840
841 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
842 struct intel_dpll_hw_state *hw_state)
843 {
844 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
845 hw_state->wrpll, hw_state->spll);
846 }
847
848 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
849 .enable = hsw_ddi_wrpll_enable,
850 .disable = hsw_ddi_wrpll_disable,
851 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
852 };
853
854 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
855 .enable = hsw_ddi_spll_enable,
856 .disable = hsw_ddi_spll_disable,
857 .get_hw_state = hsw_ddi_spll_get_hw_state,
858 };
859
860 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
861 struct intel_shared_dpll *pll)
862 {
863 }
864
865 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
866 struct intel_shared_dpll *pll)
867 {
868 }
869
870 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
871 struct intel_shared_dpll *pll,
872 struct intel_dpll_hw_state *hw_state)
873 {
874 return true;
875 }
876
877 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
878 .enable = hsw_ddi_lcpll_enable,
879 .disable = hsw_ddi_lcpll_disable,
880 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
881 };
882
883 struct skl_dpll_regs {
884 i915_reg_t ctl, cfgcr1, cfgcr2;
885 };
886
887 /* this array is indexed by the *shared* pll id */
888 static const struct skl_dpll_regs skl_dpll_regs[4] = {
889 {
890 /* DPLL 0 */
891 .ctl = LCPLL1_CTL,
892 /* DPLL 0 doesn't support HDMI mode */
893 },
894 {
895 /* DPLL 1 */
896 .ctl = LCPLL2_CTL,
897 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
898 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
899 },
900 {
901 /* DPLL 2 */
902 .ctl = WRPLL_CTL(0),
903 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
904 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
905 },
906 {
907 /* DPLL 3 */
908 .ctl = WRPLL_CTL(1),
909 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
910 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
911 },
912 };
913
914 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
915 struct intel_shared_dpll *pll)
916 {
917 uint32_t val;
918
919 val = I915_READ(DPLL_CTRL1);
920
921 val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
922 DPLL_CTRL1_LINK_RATE_MASK(pll->id));
923 val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
924
925 I915_WRITE(DPLL_CTRL1, val);
926 POSTING_READ(DPLL_CTRL1);
927 }
928
929 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
930 struct intel_shared_dpll *pll)
931 {
932 const struct skl_dpll_regs *regs = skl_dpll_regs;
933
934 skl_ddi_pll_write_ctrl1(dev_priv, pll);
935
936 I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
937 I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
938 POSTING_READ(regs[pll->id].cfgcr1);
939 POSTING_READ(regs[pll->id].cfgcr2);
940
941 /* the enable bit is always bit 31 */
942 I915_WRITE(regs[pll->id].ctl,
943 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
944
945 if (intel_wait_for_register(dev_priv,
946 DPLL_STATUS,
947 DPLL_LOCK(pll->id),
948 DPLL_LOCK(pll->id),
949 5))
950 DRM_ERROR("DPLL %d not locked\n", pll->id);
951 }
952
953 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
954 struct intel_shared_dpll *pll)
955 {
956 skl_ddi_pll_write_ctrl1(dev_priv, pll);
957 }
958
959 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
960 struct intel_shared_dpll *pll)
961 {
962 const struct skl_dpll_regs *regs = skl_dpll_regs;
963
964 /* the enable bit is always bit 31 */
965 I915_WRITE(regs[pll->id].ctl,
966 I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
967 POSTING_READ(regs[pll->id].ctl);
968 }
969
970 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
971 struct intel_shared_dpll *pll)
972 {
973 }
974
975 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
976 struct intel_shared_dpll *pll,
977 struct intel_dpll_hw_state *hw_state)
978 {
979 uint32_t val;
980 const struct skl_dpll_regs *regs = skl_dpll_regs;
981 bool ret;
982
983 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
984 return false;
985
986 ret = false;
987
988 val = I915_READ(regs[pll->id].ctl);
989 if (!(val & LCPLL_PLL_ENABLE))
990 goto out;
991
992 val = I915_READ(DPLL_CTRL1);
993 hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
994
995 /* avoid reading back stale values if HDMI mode is not enabled */
996 if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
997 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
998 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
999 }
1000 ret = true;
1001
1002 out:
1003 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1004
1005 return ret;
1006 }
1007
1008 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1009 struct intel_shared_dpll *pll,
1010 struct intel_dpll_hw_state *hw_state)
1011 {
1012 uint32_t val;
1013 const struct skl_dpll_regs *regs = skl_dpll_regs;
1014 bool ret;
1015
1016 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
1017 return false;
1018
1019 ret = false;
1020
1021 /* DPLL0 is always enabled since it drives CDCLK */
1022 val = I915_READ(regs[pll->id].ctl);
1023 if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1024 goto out;
1025
1026 val = I915_READ(DPLL_CTRL1);
1027 hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
1028
1029 ret = true;
1030
1031 out:
1032 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1033
1034 return ret;
1035 }
1036
1037 struct skl_wrpll_context {
1038 uint64_t min_deviation; /* current minimal deviation */
1039 uint64_t central_freq; /* chosen central freq */
1040 uint64_t dco_freq; /* chosen dco freq */
1041 unsigned int p; /* chosen divider */
1042 };
1043
1044 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1045 {
1046 memset(ctx, 0, sizeof(*ctx));
1047
1048 ctx->min_deviation = U64_MAX;
1049 }
1050
1051 /* DCO freq must be within +1%/-6% of the DCO central freq */
1052 #define SKL_DCO_MAX_PDEVIATION 100
1053 #define SKL_DCO_MAX_NDEVIATION 600
1054
1055 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1056 uint64_t central_freq,
1057 uint64_t dco_freq,
1058 unsigned int divider)
1059 {
1060 uint64_t deviation;
1061
1062 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1063 central_freq);
1064
1065 /* positive deviation */
1066 if (dco_freq >= central_freq) {
1067 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1068 deviation < ctx->min_deviation) {
1069 ctx->min_deviation = deviation;
1070 ctx->central_freq = central_freq;
1071 ctx->dco_freq = dco_freq;
1072 ctx->p = divider;
1073 }
1074 /* negative deviation */
1075 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1076 deviation < ctx->min_deviation) {
1077 ctx->min_deviation = deviation;
1078 ctx->central_freq = central_freq;
1079 ctx->dco_freq = dco_freq;
1080 ctx->p = divider;
1081 }
1082 }
1083
1084 static void skl_wrpll_get_multipliers(unsigned int p,
1085 unsigned int *p0 /* out */,
1086 unsigned int *p1 /* out */,
1087 unsigned int *p2 /* out */)
1088 {
1089 /* even dividers */
1090 if (p % 2 == 0) {
1091 unsigned int half = p / 2;
1092
1093 if (half == 1 || half == 2 || half == 3 || half == 5) {
1094 *p0 = 2;
1095 *p1 = 1;
1096 *p2 = half;
1097 } else if (half % 2 == 0) {
1098 *p0 = 2;
1099 *p1 = half / 2;
1100 *p2 = 2;
1101 } else if (half % 3 == 0) {
1102 *p0 = 3;
1103 *p1 = half / 3;
1104 *p2 = 2;
1105 } else if (half % 7 == 0) {
1106 *p0 = 7;
1107 *p1 = half / 7;
1108 *p2 = 2;
1109 }
1110 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1111 *p0 = 3;
1112 *p1 = 1;
1113 *p2 = p / 3;
1114 } else if (p == 5 || p == 7) {
1115 *p0 = p;
1116 *p1 = 1;
1117 *p2 = 1;
1118 } else if (p == 15) {
1119 *p0 = 3;
1120 *p1 = 1;
1121 *p2 = 5;
1122 } else if (p == 21) {
1123 *p0 = 7;
1124 *p1 = 1;
1125 *p2 = 3;
1126 } else if (p == 35) {
1127 *p0 = 7;
1128 *p1 = 1;
1129 *p2 = 5;
1130 }
1131 }
1132
1133 struct skl_wrpll_params {
1134 uint32_t dco_fraction;
1135 uint32_t dco_integer;
1136 uint32_t qdiv_ratio;
1137 uint32_t qdiv_mode;
1138 uint32_t kdiv;
1139 uint32_t pdiv;
1140 uint32_t central_freq;
1141 };
1142
1143 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1144 uint64_t afe_clock,
1145 uint64_t central_freq,
1146 uint32_t p0, uint32_t p1, uint32_t p2)
1147 {
1148 uint64_t dco_freq;
1149
1150 switch (central_freq) {
1151 case 9600000000ULL:
1152 params->central_freq = 0;
1153 break;
1154 case 9000000000ULL:
1155 params->central_freq = 1;
1156 break;
1157 case 8400000000ULL:
1158 params->central_freq = 3;
1159 }
1160
1161 switch (p0) {
1162 case 1:
1163 params->pdiv = 0;
1164 break;
1165 case 2:
1166 params->pdiv = 1;
1167 break;
1168 case 3:
1169 params->pdiv = 2;
1170 break;
1171 case 7:
1172 params->pdiv = 4;
1173 break;
1174 default:
1175 WARN(1, "Incorrect PDiv\n");
1176 }
1177
1178 switch (p2) {
1179 case 5:
1180 params->kdiv = 0;
1181 break;
1182 case 2:
1183 params->kdiv = 1;
1184 break;
1185 case 3:
1186 params->kdiv = 2;
1187 break;
1188 case 1:
1189 params->kdiv = 3;
1190 break;
1191 default:
1192 WARN(1, "Incorrect KDiv\n");
1193 }
1194
1195 params->qdiv_ratio = p1;
1196 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1197
1198 dco_freq = p0 * p1 * p2 * afe_clock;
1199
1200 /*
1201 * Intermediate values are in Hz.
1202 * Divide by MHz to match bsepc
1203 */
1204 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1205 params->dco_fraction =
1206 div_u64((div_u64(dco_freq, 24) -
1207 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1208 }
1209
1210 static bool
1211 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1212 struct skl_wrpll_params *wrpll_params)
1213 {
1214 uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1215 uint64_t dco_central_freq[3] = {8400000000ULL,
1216 9000000000ULL,
1217 9600000000ULL};
1218 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1219 24, 28, 30, 32, 36, 40, 42, 44,
1220 48, 52, 54, 56, 60, 64, 66, 68,
1221 70, 72, 76, 78, 80, 84, 88, 90,
1222 92, 96, 98 };
1223 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1224 static const struct {
1225 const int *list;
1226 int n_dividers;
1227 } dividers[] = {
1228 { even_dividers, ARRAY_SIZE(even_dividers) },
1229 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1230 };
1231 struct skl_wrpll_context ctx;
1232 unsigned int dco, d, i;
1233 unsigned int p0, p1, p2;
1234
1235 skl_wrpll_context_init(&ctx);
1236
1237 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1238 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1239 for (i = 0; i < dividers[d].n_dividers; i++) {
1240 unsigned int p = dividers[d].list[i];
1241 uint64_t dco_freq = p * afe_clock;
1242
1243 skl_wrpll_try_divider(&ctx,
1244 dco_central_freq[dco],
1245 dco_freq,
1246 p);
1247 /*
1248 * Skip the remaining dividers if we're sure to
1249 * have found the definitive divider, we can't
1250 * improve a 0 deviation.
1251 */
1252 if (ctx.min_deviation == 0)
1253 goto skip_remaining_dividers;
1254 }
1255 }
1256
1257 skip_remaining_dividers:
1258 /*
1259 * If a solution is found with an even divider, prefer
1260 * this one.
1261 */
1262 if (d == 0 && ctx.p)
1263 break;
1264 }
1265
1266 if (!ctx.p) {
1267 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1268 return false;
1269 }
1270
1271 /*
1272 * gcc incorrectly analyses that these can be used without being
1273 * initialized. To be fair, it's hard to guess.
1274 */
1275 p0 = p1 = p2 = 0;
1276 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1277 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1278 p0, p1, p2);
1279
1280 return true;
1281 }
1282
1283 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
1284 struct intel_crtc_state *crtc_state,
1285 int clock)
1286 {
1287 uint32_t ctrl1, cfgcr1, cfgcr2;
1288 struct skl_wrpll_params wrpll_params = { 0, };
1289
1290 /*
1291 * See comment in intel_dpll_hw_state to understand why we always use 0
1292 * as the DPLL id in this function.
1293 */
1294 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1295
1296 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1297
1298 if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
1299 return false;
1300
1301 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1302 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1303 wrpll_params.dco_integer;
1304
1305 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1306 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1307 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1308 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1309 wrpll_params.central_freq;
1310
1311 memset(&crtc_state->dpll_hw_state, 0,
1312 sizeof(crtc_state->dpll_hw_state));
1313
1314 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1315 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1316 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1317 return true;
1318 }
1319
1320 static bool
1321 skl_ddi_dp_set_dpll_hw_state(int clock,
1322 struct intel_dpll_hw_state *dpll_hw_state)
1323 {
1324 uint32_t ctrl1;
1325
1326 /*
1327 * See comment in intel_dpll_hw_state to understand why we always use 0
1328 * as the DPLL id in this function.
1329 */
1330 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1331 switch (clock / 2) {
1332 case 81000:
1333 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1334 break;
1335 case 135000:
1336 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1337 break;
1338 case 270000:
1339 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1340 break;
1341 /* eDP 1.4 rates */
1342 case 162000:
1343 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1344 break;
1345 case 108000:
1346 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1347 break;
1348 case 216000:
1349 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1350 break;
1351 }
1352
1353 dpll_hw_state->ctrl1 = ctrl1;
1354 return true;
1355 }
1356
1357 static struct intel_shared_dpll *
1358 skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1359 struct intel_encoder *encoder)
1360 {
1361 struct intel_shared_dpll *pll;
1362 int clock = crtc_state->port_clock;
1363 bool bret;
1364 struct intel_dpll_hw_state dpll_hw_state;
1365
1366 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
1367
1368 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1369 bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
1370 if (!bret) {
1371 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1372 return NULL;
1373 }
1374 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1375 bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
1376 if (!bret) {
1377 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1378 return NULL;
1379 }
1380 crtc_state->dpll_hw_state = dpll_hw_state;
1381 } else {
1382 return NULL;
1383 }
1384
1385 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1386 pll = intel_find_shared_dpll(crtc, crtc_state,
1387 DPLL_ID_SKL_DPLL0,
1388 DPLL_ID_SKL_DPLL0);
1389 else
1390 pll = intel_find_shared_dpll(crtc, crtc_state,
1391 DPLL_ID_SKL_DPLL1,
1392 DPLL_ID_SKL_DPLL3);
1393 if (!pll)
1394 return NULL;
1395
1396 intel_reference_shared_dpll(pll, crtc_state);
1397
1398 return pll;
1399 }
1400
1401 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1402 struct intel_dpll_hw_state *hw_state)
1403 {
1404 DRM_DEBUG_KMS("dpll_hw_state: "
1405 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1406 hw_state->ctrl1,
1407 hw_state->cfgcr1,
1408 hw_state->cfgcr2);
1409 }
1410
1411 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1412 .enable = skl_ddi_pll_enable,
1413 .disable = skl_ddi_pll_disable,
1414 .get_hw_state = skl_ddi_pll_get_hw_state,
1415 };
1416
1417 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1418 .enable = skl_ddi_dpll0_enable,
1419 .disable = skl_ddi_dpll0_disable,
1420 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1421 };
1422
1423 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1424 struct intel_shared_dpll *pll)
1425 {
1426 uint32_t temp;
1427 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
1428 enum dpio_phy phy;
1429 enum dpio_channel ch;
1430
1431 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1432
1433 /* Non-SSC reference */
1434 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1435 temp |= PORT_PLL_REF_SEL;
1436 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1437
1438 if (IS_GEMINILAKE(dev_priv)) {
1439 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1440 temp |= PORT_PLL_POWER_ENABLE;
1441 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1442
1443 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1444 PORT_PLL_POWER_STATE), 200))
1445 DRM_ERROR("Power state not set for PLL:%d\n", port);
1446 }
1447
1448 /* Disable 10 bit clock */
1449 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1450 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1451 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1452
1453 /* Write P1 & P2 */
1454 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1455 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1456 temp |= pll->state.hw_state.ebb0;
1457 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1458
1459 /* Write M2 integer */
1460 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1461 temp &= ~PORT_PLL_M2_MASK;
1462 temp |= pll->state.hw_state.pll0;
1463 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1464
1465 /* Write N */
1466 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1467 temp &= ~PORT_PLL_N_MASK;
1468 temp |= pll->state.hw_state.pll1;
1469 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1470
1471 /* Write M2 fraction */
1472 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1473 temp &= ~PORT_PLL_M2_FRAC_MASK;
1474 temp |= pll->state.hw_state.pll2;
1475 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1476
1477 /* Write M2 fraction enable */
1478 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1479 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1480 temp |= pll->state.hw_state.pll3;
1481 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1482
1483 /* Write coeff */
1484 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1485 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1486 temp &= ~PORT_PLL_INT_COEFF_MASK;
1487 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1488 temp |= pll->state.hw_state.pll6;
1489 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1490
1491 /* Write calibration val */
1492 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1493 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1494 temp |= pll->state.hw_state.pll8;
1495 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1496
1497 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1498 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1499 temp |= pll->state.hw_state.pll9;
1500 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1501
1502 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1503 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1504 temp &= ~PORT_PLL_DCO_AMP_MASK;
1505 temp |= pll->state.hw_state.pll10;
1506 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1507
1508 /* Recalibrate with new settings */
1509 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1510 temp |= PORT_PLL_RECALIBRATE;
1511 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1512 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1513 temp |= pll->state.hw_state.ebb4;
1514 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1515
1516 /* Enable PLL */
1517 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1518 temp |= PORT_PLL_ENABLE;
1519 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1520 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1521
1522 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1523 200))
1524 DRM_ERROR("PLL %d not locked\n", port);
1525
1526 if (IS_GEMINILAKE(dev_priv)) {
1527 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1528 temp |= DCC_DELAY_RANGE_2;
1529 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1530 }
1531
1532 /*
1533 * While we write to the group register to program all lanes at once we
1534 * can read only lane registers and we pick lanes 0/1 for that.
1535 */
1536 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1537 temp &= ~LANE_STAGGER_MASK;
1538 temp &= ~LANESTAGGER_STRAP_OVRD;
1539 temp |= pll->state.hw_state.pcsdw12;
1540 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1541 }
1542
1543 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1544 struct intel_shared_dpll *pll)
1545 {
1546 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
1547 uint32_t temp;
1548
1549 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1550 temp &= ~PORT_PLL_ENABLE;
1551 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1552 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1553
1554 if (IS_GEMINILAKE(dev_priv)) {
1555 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1556 temp &= ~PORT_PLL_POWER_ENABLE;
1557 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1558
1559 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1560 PORT_PLL_POWER_STATE), 200))
1561 DRM_ERROR("Power state not reset for PLL:%d\n", port);
1562 }
1563 }
1564
1565 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1566 struct intel_shared_dpll *pll,
1567 struct intel_dpll_hw_state *hw_state)
1568 {
1569 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
1570 uint32_t val;
1571 bool ret;
1572 enum dpio_phy phy;
1573 enum dpio_channel ch;
1574
1575 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1576
1577 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
1578 return false;
1579
1580 ret = false;
1581
1582 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1583 if (!(val & PORT_PLL_ENABLE))
1584 goto out;
1585
1586 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1587 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1588
1589 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1590 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1591
1592 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1593 hw_state->pll0 &= PORT_PLL_M2_MASK;
1594
1595 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1596 hw_state->pll1 &= PORT_PLL_N_MASK;
1597
1598 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1599 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1600
1601 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1602 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1603
1604 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1605 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1606 PORT_PLL_INT_COEFF_MASK |
1607 PORT_PLL_GAIN_CTL_MASK;
1608
1609 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1610 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1611
1612 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1613 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1614
1615 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1616 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1617 PORT_PLL_DCO_AMP_MASK;
1618
1619 /*
1620 * While we write to the group register to program all lanes at once we
1621 * can read only lane registers. We configure all lanes the same way, so
1622 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1623 */
1624 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1625 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1626 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1627 hw_state->pcsdw12,
1628 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1629 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1630
1631 ret = true;
1632
1633 out:
1634 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1635
1636 return ret;
1637 }
1638
1639 /* bxt clock parameters */
1640 struct bxt_clk_div {
1641 int clock;
1642 uint32_t p1;
1643 uint32_t p2;
1644 uint32_t m2_int;
1645 uint32_t m2_frac;
1646 bool m2_frac_en;
1647 uint32_t n;
1648
1649 int vco;
1650 };
1651
1652 /* pre-calculated values for DP linkrates */
1653 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1654 {162000, 4, 2, 32, 1677722, 1, 1},
1655 {270000, 4, 1, 27, 0, 0, 1},
1656 {540000, 2, 1, 27, 0, 0, 1},
1657 {216000, 3, 2, 32, 1677722, 1, 1},
1658 {243000, 4, 1, 24, 1258291, 1, 1},
1659 {324000, 4, 1, 32, 1677722, 1, 1},
1660 {432000, 3, 1, 32, 1677722, 1, 1}
1661 };
1662
1663 static bool
1664 bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
1665 struct intel_crtc_state *crtc_state, int clock,
1666 struct bxt_clk_div *clk_div)
1667 {
1668 struct dpll best_clock;
1669
1670 /* Calculate HDMI div */
1671 /*
1672 * FIXME: tie the following calculation into
1673 * i9xx_crtc_compute_clock
1674 */
1675 if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
1676 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1677 clock, pipe_name(intel_crtc->pipe));
1678 return false;
1679 }
1680
1681 clk_div->p1 = best_clock.p1;
1682 clk_div->p2 = best_clock.p2;
1683 WARN_ON(best_clock.m1 != 2);
1684 clk_div->n = best_clock.n;
1685 clk_div->m2_int = best_clock.m2 >> 22;
1686 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1687 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1688
1689 clk_div->vco = best_clock.vco;
1690
1691 return true;
1692 }
1693
1694 static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
1695 {
1696 int i;
1697
1698 *clk_div = bxt_dp_clk_val[0];
1699 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1700 if (bxt_dp_clk_val[i].clock == clock) {
1701 *clk_div = bxt_dp_clk_val[i];
1702 break;
1703 }
1704 }
1705
1706 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1707 }
1708
1709 static bool bxt_ddi_set_dpll_hw_state(int clock,
1710 struct bxt_clk_div *clk_div,
1711 struct intel_dpll_hw_state *dpll_hw_state)
1712 {
1713 int vco = clk_div->vco;
1714 uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
1715 uint32_t lanestagger;
1716
1717 if (vco >= 6200000 && vco <= 6700000) {
1718 prop_coef = 4;
1719 int_coef = 9;
1720 gain_ctl = 3;
1721 targ_cnt = 8;
1722 } else if ((vco > 5400000 && vco < 6200000) ||
1723 (vco >= 4800000 && vco < 5400000)) {
1724 prop_coef = 5;
1725 int_coef = 11;
1726 gain_ctl = 3;
1727 targ_cnt = 9;
1728 } else if (vco == 5400000) {
1729 prop_coef = 3;
1730 int_coef = 8;
1731 gain_ctl = 1;
1732 targ_cnt = 9;
1733 } else {
1734 DRM_ERROR("Invalid VCO\n");
1735 return false;
1736 }
1737
1738 if (clock > 270000)
1739 lanestagger = 0x18;
1740 else if (clock > 135000)
1741 lanestagger = 0x0d;
1742 else if (clock > 67000)
1743 lanestagger = 0x07;
1744 else if (clock > 33000)
1745 lanestagger = 0x04;
1746 else
1747 lanestagger = 0x02;
1748
1749 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1750 dpll_hw_state->pll0 = clk_div->m2_int;
1751 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1752 dpll_hw_state->pll2 = clk_div->m2_frac;
1753
1754 if (clk_div->m2_frac_en)
1755 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1756
1757 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1758 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1759
1760 dpll_hw_state->pll8 = targ_cnt;
1761
1762 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1763
1764 dpll_hw_state->pll10 =
1765 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1766 | PORT_PLL_DCO_AMP_OVR_EN_H;
1767
1768 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1769
1770 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1771
1772 return true;
1773 }
1774
1775 static bool
1776 bxt_ddi_dp_set_dpll_hw_state(int clock,
1777 struct intel_dpll_hw_state *dpll_hw_state)
1778 {
1779 struct bxt_clk_div clk_div = {0};
1780
1781 bxt_ddi_dp_pll_dividers(clock, &clk_div);
1782
1783 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1784 }
1785
1786 static bool
1787 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
1788 struct intel_crtc_state *crtc_state, int clock,
1789 struct intel_dpll_hw_state *dpll_hw_state)
1790 {
1791 struct bxt_clk_div clk_div = { };
1792
1793 bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
1794
1795 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1796 }
1797
1798 static struct intel_shared_dpll *
1799 bxt_get_dpll(struct intel_crtc *crtc,
1800 struct intel_crtc_state *crtc_state,
1801 struct intel_encoder *encoder)
1802 {
1803 struct intel_dpll_hw_state dpll_hw_state = { };
1804 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1805 struct intel_shared_dpll *pll;
1806 int i, clock = crtc_state->port_clock;
1807
1808 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1809 !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
1810 &dpll_hw_state))
1811 return NULL;
1812
1813 if (intel_crtc_has_dp_encoder(crtc_state) &&
1814 !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
1815 return NULL;
1816
1817 memset(&crtc_state->dpll_hw_state, 0,
1818 sizeof(crtc_state->dpll_hw_state));
1819
1820 crtc_state->dpll_hw_state = dpll_hw_state;
1821
1822 /* 1:1 mapping between ports and PLLs */
1823 i = (enum intel_dpll_id) encoder->port;
1824 pll = intel_get_shared_dpll_by_id(dev_priv, i);
1825
1826 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1827 crtc->base.base.id, crtc->base.name, pll->name);
1828
1829 intel_reference_shared_dpll(pll, crtc_state);
1830
1831 return pll;
1832 }
1833
1834 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1835 struct intel_dpll_hw_state *hw_state)
1836 {
1837 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1838 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1839 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1840 hw_state->ebb0,
1841 hw_state->ebb4,
1842 hw_state->pll0,
1843 hw_state->pll1,
1844 hw_state->pll2,
1845 hw_state->pll3,
1846 hw_state->pll6,
1847 hw_state->pll8,
1848 hw_state->pll9,
1849 hw_state->pll10,
1850 hw_state->pcsdw12);
1851 }
1852
1853 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1854 .enable = bxt_ddi_pll_enable,
1855 .disable = bxt_ddi_pll_disable,
1856 .get_hw_state = bxt_ddi_pll_get_hw_state,
1857 };
1858
1859 static void intel_ddi_pll_init(struct drm_device *dev)
1860 {
1861 struct drm_i915_private *dev_priv = to_i915(dev);
1862
1863 if (INTEL_GEN(dev_priv) < 9) {
1864 uint32_t val = I915_READ(LCPLL_CTL);
1865
1866 /*
1867 * The LCPLL register should be turned on by the BIOS. For now
1868 * let's just check its state and print errors in case
1869 * something is wrong. Don't even try to turn it on.
1870 */
1871
1872 if (val & LCPLL_CD_SOURCE_FCLK)
1873 DRM_ERROR("CDCLK source is not LCPLL\n");
1874
1875 if (val & LCPLL_PLL_DISABLE)
1876 DRM_ERROR("LCPLL is disabled\n");
1877 }
1878 }
1879
1880 struct dpll_info {
1881 const char *name;
1882 const int id;
1883 const struct intel_shared_dpll_funcs *funcs;
1884 uint32_t flags;
1885 };
1886
1887 struct intel_dpll_mgr {
1888 const struct dpll_info *dpll_info;
1889
1890 struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
1891 struct intel_crtc_state *crtc_state,
1892 struct intel_encoder *encoder);
1893
1894 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1895 struct intel_dpll_hw_state *hw_state);
1896 };
1897
1898 static const struct dpll_info pch_plls[] = {
1899 { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
1900 { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
1901 { NULL, -1, NULL, 0 },
1902 };
1903
1904 static const struct intel_dpll_mgr pch_pll_mgr = {
1905 .dpll_info = pch_plls,
1906 .get_dpll = ibx_get_dpll,
1907 .dump_hw_state = ibx_dump_hw_state,
1908 };
1909
1910 static const struct dpll_info hsw_plls[] = {
1911 { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 },
1912 { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 },
1913 { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 },
1914 { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1915 { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1916 { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1917 { NULL, -1, NULL, },
1918 };
1919
1920 static const struct intel_dpll_mgr hsw_pll_mgr = {
1921 .dpll_info = hsw_plls,
1922 .get_dpll = hsw_get_dpll,
1923 .dump_hw_state = hsw_dump_hw_state,
1924 };
1925
1926 static const struct dpll_info skl_plls[] = {
1927 { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
1928 { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
1929 { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
1930 { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
1931 { NULL, -1, NULL, },
1932 };
1933
1934 static const struct intel_dpll_mgr skl_pll_mgr = {
1935 .dpll_info = skl_plls,
1936 .get_dpll = skl_get_dpll,
1937 .dump_hw_state = skl_dump_hw_state,
1938 };
1939
1940 static const struct dpll_info bxt_plls[] = {
1941 { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
1942 { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
1943 { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
1944 { NULL, -1, NULL, },
1945 };
1946
1947 static const struct intel_dpll_mgr bxt_pll_mgr = {
1948 .dpll_info = bxt_plls,
1949 .get_dpll = bxt_get_dpll,
1950 .dump_hw_state = bxt_dump_hw_state,
1951 };
1952
1953 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1954 struct intel_shared_dpll *pll)
1955 {
1956 uint32_t val;
1957
1958 /* 1. Enable DPLL power in DPLL_ENABLE. */
1959 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
1960 val |= PLL_POWER_ENABLE;
1961 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
1962
1963 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
1964 if (intel_wait_for_register(dev_priv,
1965 CNL_DPLL_ENABLE(pll->id),
1966 PLL_POWER_STATE,
1967 PLL_POWER_STATE,
1968 5))
1969 DRM_ERROR("PLL %d Power not enabled\n", pll->id);
1970
1971 /*
1972 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
1973 * select DP mode, and set DP link rate.
1974 */
1975 val = pll->state.hw_state.cfgcr0;
1976 I915_WRITE(CNL_DPLL_CFGCR0(pll->id), val);
1977
1978 /* 4. Reab back to ensure writes completed */
1979 POSTING_READ(CNL_DPLL_CFGCR0(pll->id));
1980
1981 /* 3. Configure DPLL_CFGCR0 */
1982 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
1983 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
1984 val = pll->state.hw_state.cfgcr1;
1985 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
1986 /* 4. Reab back to ensure writes completed */
1987 POSTING_READ(CNL_DPLL_CFGCR1(pll->id));
1988 }
1989
1990 /*
1991 * 5. If the frequency will result in a change to the voltage
1992 * requirement, follow the Display Voltage Frequency Switching
1993 * Sequence Before Frequency Change
1994 *
1995 * Note: DVFS is actually handled via the cdclk code paths,
1996 * hence we do nothing here.
1997 */
1998
1999 /* 6. Enable DPLL in DPLL_ENABLE. */
2000 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
2001 val |= PLL_ENABLE;
2002 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
2003
2004 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2005 if (intel_wait_for_register(dev_priv,
2006 CNL_DPLL_ENABLE(pll->id),
2007 PLL_LOCK,
2008 PLL_LOCK,
2009 5))
2010 DRM_ERROR("PLL %d not locked\n", pll->id);
2011
2012 /*
2013 * 8. If the frequency will result in a change to the voltage
2014 * requirement, follow the Display Voltage Frequency Switching
2015 * Sequence After Frequency Change
2016 *
2017 * Note: DVFS is actually handled via the cdclk code paths,
2018 * hence we do nothing here.
2019 */
2020
2021 /*
2022 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2023 * Done at intel_ddi_clk_select
2024 */
2025 }
2026
2027 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2028 struct intel_shared_dpll *pll)
2029 {
2030 uint32_t val;
2031
2032 /*
2033 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2034 * Done at intel_ddi_post_disable
2035 */
2036
2037 /*
2038 * 2. If the frequency will result in a change to the voltage
2039 * requirement, follow the Display Voltage Frequency Switching
2040 * Sequence Before Frequency Change
2041 *
2042 * Note: DVFS is actually handled via the cdclk code paths,
2043 * hence we do nothing here.
2044 */
2045
2046 /* 3. Disable DPLL through DPLL_ENABLE. */
2047 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
2048 val &= ~PLL_ENABLE;
2049 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
2050
2051 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2052 if (intel_wait_for_register(dev_priv,
2053 CNL_DPLL_ENABLE(pll->id),
2054 PLL_LOCK,
2055 0,
2056 5))
2057 DRM_ERROR("PLL %d locked\n", pll->id);
2058
2059 /*
2060 * 5. If the frequency will result in a change to the voltage
2061 * requirement, follow the Display Voltage Frequency Switching
2062 * Sequence After Frequency Change
2063 *
2064 * Note: DVFS is actually handled via the cdclk code paths,
2065 * hence we do nothing here.
2066 */
2067
2068 /* 6. Disable DPLL power in DPLL_ENABLE. */
2069 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
2070 val &= ~PLL_POWER_ENABLE;
2071 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val);
2072
2073 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2074 if (intel_wait_for_register(dev_priv,
2075 CNL_DPLL_ENABLE(pll->id),
2076 PLL_POWER_STATE,
2077 0,
2078 5))
2079 DRM_ERROR("PLL %d Power not disabled\n", pll->id);
2080 }
2081
2082 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2083 struct intel_shared_dpll *pll,
2084 struct intel_dpll_hw_state *hw_state)
2085 {
2086 uint32_t val;
2087 bool ret;
2088
2089 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2090 return false;
2091
2092 ret = false;
2093
2094 val = I915_READ(CNL_DPLL_ENABLE(pll->id));
2095 if (!(val & PLL_ENABLE))
2096 goto out;
2097
2098 val = I915_READ(CNL_DPLL_CFGCR0(pll->id));
2099 hw_state->cfgcr0 = val;
2100
2101 /* avoid reading back stale values if HDMI mode is not enabled */
2102 if (val & DPLL_CFGCR0_HDMI_MODE) {
2103 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll->id));
2104 }
2105 ret = true;
2106
2107 out:
2108 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2109
2110 return ret;
2111 }
2112
2113 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2114 int *qdiv, int *kdiv)
2115 {
2116 /* even dividers */
2117 if (bestdiv % 2 == 0) {
2118 if (bestdiv == 2) {
2119 *pdiv = 2;
2120 *qdiv = 1;
2121 *kdiv = 1;
2122 } else if (bestdiv % 4 == 0) {
2123 *pdiv = 2;
2124 *qdiv = bestdiv / 4;
2125 *kdiv = 2;
2126 } else if (bestdiv % 6 == 0) {
2127 *pdiv = 3;
2128 *qdiv = bestdiv / 6;
2129 *kdiv = 2;
2130 } else if (bestdiv % 5 == 0) {
2131 *pdiv = 5;
2132 *qdiv = bestdiv / 10;
2133 *kdiv = 2;
2134 } else if (bestdiv % 14 == 0) {
2135 *pdiv = 7;
2136 *qdiv = bestdiv / 14;
2137 *kdiv = 2;
2138 }
2139 } else {
2140 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2141 *pdiv = bestdiv;
2142 *qdiv = 1;
2143 *kdiv = 1;
2144 } else { /* 9, 15, 21 */
2145 *pdiv = bestdiv / 3;
2146 *qdiv = 1;
2147 *kdiv = 3;
2148 }
2149 }
2150 }
2151
2152 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2153 u32 dco_freq, u32 ref_freq,
2154 int pdiv, int qdiv, int kdiv)
2155 {
2156 u32 dco;
2157
2158 switch (kdiv) {
2159 case 1:
2160 params->kdiv = 1;
2161 break;
2162 case 2:
2163 params->kdiv = 2;
2164 break;
2165 case 3:
2166 params->kdiv = 4;
2167 break;
2168 default:
2169 WARN(1, "Incorrect KDiv\n");
2170 }
2171
2172 switch (pdiv) {
2173 case 2:
2174 params->pdiv = 1;
2175 break;
2176 case 3:
2177 params->pdiv = 2;
2178 break;
2179 case 5:
2180 params->pdiv = 4;
2181 break;
2182 case 7:
2183 params->pdiv = 8;
2184 break;
2185 default:
2186 WARN(1, "Incorrect PDiv\n");
2187 }
2188
2189 WARN_ON(kdiv != 2 && qdiv != 1);
2190
2191 params->qdiv_ratio = qdiv;
2192 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2193
2194 dco = div_u64((u64)dco_freq << 15, ref_freq);
2195
2196 params->dco_integer = dco >> 15;
2197 params->dco_fraction = dco & 0x7fff;
2198 }
2199
2200 static bool
2201 cnl_ddi_calculate_wrpll(int clock,
2202 struct drm_i915_private *dev_priv,
2203 struct skl_wrpll_params *wrpll_params)
2204 {
2205 u32 afe_clock = clock * 5;
2206 u32 dco_min = 7998000;
2207 u32 dco_max = 10000000;
2208 u32 dco_mid = (dco_min + dco_max) / 2;
2209 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2210 18, 20, 24, 28, 30, 32, 36, 40,
2211 42, 44, 48, 50, 52, 54, 56, 60,
2212 64, 66, 68, 70, 72, 76, 78, 80,
2213 84, 88, 90, 92, 96, 98, 100, 102,
2214 3, 5, 7, 9, 15, 21 };
2215 u32 dco, best_dco = 0, dco_centrality = 0;
2216 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2217 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2218
2219 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2220 dco = afe_clock * dividers[d];
2221
2222 if ((dco <= dco_max) && (dco >= dco_min)) {
2223 dco_centrality = abs(dco - dco_mid);
2224
2225 if (dco_centrality < best_dco_centrality) {
2226 best_dco_centrality = dco_centrality;
2227 best_div = dividers[d];
2228 best_dco = dco;
2229 }
2230 }
2231 }
2232
2233 if (best_div == 0)
2234 return false;
2235
2236 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2237
2238 cnl_wrpll_params_populate(wrpll_params, best_dco,
2239 dev_priv->cdclk.hw.ref, pdiv, qdiv, kdiv);
2240
2241 return true;
2242 }
2243
2244 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
2245 struct intel_crtc_state *crtc_state,
2246 int clock)
2247 {
2248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2249 uint32_t cfgcr0, cfgcr1;
2250 struct skl_wrpll_params wrpll_params = { 0, };
2251
2252 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2253
2254 if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
2255 return false;
2256
2257 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2258 wrpll_params.dco_integer;
2259
2260 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2261 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2262 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2263 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2264 DPLL_CFGCR1_CENTRAL_FREQ;
2265
2266 memset(&crtc_state->dpll_hw_state, 0,
2267 sizeof(crtc_state->dpll_hw_state));
2268
2269 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2270 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2271 return true;
2272 }
2273
2274 static bool
2275 cnl_ddi_dp_set_dpll_hw_state(int clock,
2276 struct intel_dpll_hw_state *dpll_hw_state)
2277 {
2278 uint32_t cfgcr0;
2279
2280 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2281
2282 switch (clock / 2) {
2283 case 81000:
2284 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2285 break;
2286 case 135000:
2287 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2288 break;
2289 case 270000:
2290 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2291 break;
2292 /* eDP 1.4 rates */
2293 case 162000:
2294 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2295 break;
2296 case 108000:
2297 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2298 break;
2299 case 216000:
2300 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2301 break;
2302 case 324000:
2303 /* Some SKUs may require elevated I/O voltage to support this */
2304 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2305 break;
2306 case 405000:
2307 /* Some SKUs may require elevated I/O voltage to support this */
2308 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2309 break;
2310 }
2311
2312 dpll_hw_state->cfgcr0 = cfgcr0;
2313 return true;
2314 }
2315
2316 static struct intel_shared_dpll *
2317 cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
2318 struct intel_encoder *encoder)
2319 {
2320 struct intel_shared_dpll *pll;
2321 int clock = crtc_state->port_clock;
2322 bool bret;
2323 struct intel_dpll_hw_state dpll_hw_state;
2324
2325 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
2326
2327 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2328 bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
2329 if (!bret) {
2330 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2331 return NULL;
2332 }
2333 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2334 bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
2335 if (!bret) {
2336 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2337 return NULL;
2338 }
2339 crtc_state->dpll_hw_state = dpll_hw_state;
2340 } else {
2341 DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2342 crtc_state->output_types);
2343 return NULL;
2344 }
2345
2346 pll = intel_find_shared_dpll(crtc, crtc_state,
2347 DPLL_ID_SKL_DPLL0,
2348 DPLL_ID_SKL_DPLL2);
2349 if (!pll) {
2350 DRM_DEBUG_KMS("No PLL selected\n");
2351 return NULL;
2352 }
2353
2354 intel_reference_shared_dpll(pll, crtc_state);
2355
2356 return pll;
2357 }
2358
2359 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2360 struct intel_dpll_hw_state *hw_state)
2361 {
2362 DRM_DEBUG_KMS("dpll_hw_state: "
2363 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2364 hw_state->cfgcr0,
2365 hw_state->cfgcr1);
2366 }
2367
2368 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2369 .enable = cnl_ddi_pll_enable,
2370 .disable = cnl_ddi_pll_disable,
2371 .get_hw_state = cnl_ddi_pll_get_hw_state,
2372 };
2373
2374 static const struct dpll_info cnl_plls[] = {
2375 { "DPLL 0", DPLL_ID_SKL_DPLL0, &cnl_ddi_pll_funcs, 0 },
2376 { "DPLL 1", DPLL_ID_SKL_DPLL1, &cnl_ddi_pll_funcs, 0 },
2377 { "DPLL 2", DPLL_ID_SKL_DPLL2, &cnl_ddi_pll_funcs, 0 },
2378 { NULL, -1, NULL, },
2379 };
2380
2381 static const struct intel_dpll_mgr cnl_pll_mgr = {
2382 .dpll_info = cnl_plls,
2383 .get_dpll = cnl_get_dpll,
2384 .dump_hw_state = cnl_dump_hw_state,
2385 };
2386
2387 /**
2388 * intel_shared_dpll_init - Initialize shared DPLLs
2389 * @dev: drm device
2390 *
2391 * Initialize shared DPLLs for @dev.
2392 */
2393 void intel_shared_dpll_init(struct drm_device *dev)
2394 {
2395 struct drm_i915_private *dev_priv = to_i915(dev);
2396 const struct intel_dpll_mgr *dpll_mgr = NULL;
2397 const struct dpll_info *dpll_info;
2398 int i;
2399
2400 if (IS_CANNONLAKE(dev_priv))
2401 dpll_mgr = &cnl_pll_mgr;
2402 else if (IS_GEN9_BC(dev_priv))
2403 dpll_mgr = &skl_pll_mgr;
2404 else if (IS_GEN9_LP(dev_priv))
2405 dpll_mgr = &bxt_pll_mgr;
2406 else if (HAS_DDI(dev_priv))
2407 dpll_mgr = &hsw_pll_mgr;
2408 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
2409 dpll_mgr = &pch_pll_mgr;
2410
2411 if (!dpll_mgr) {
2412 dev_priv->num_shared_dpll = 0;
2413 return;
2414 }
2415
2416 dpll_info = dpll_mgr->dpll_info;
2417
2418 for (i = 0; dpll_info[i].id >= 0; i++) {
2419 WARN_ON(i != dpll_info[i].id);
2420
2421 dev_priv->shared_dplls[i].id = dpll_info[i].id;
2422 dev_priv->shared_dplls[i].name = dpll_info[i].name;
2423 dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
2424 dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
2425 }
2426
2427 dev_priv->dpll_mgr = dpll_mgr;
2428 dev_priv->num_shared_dpll = i;
2429 mutex_init(&dev_priv->dpll_lock);
2430
2431 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
2432
2433 /* FIXME: Move this to a more suitable place */
2434 if (HAS_DDI(dev_priv))
2435 intel_ddi_pll_init(dev);
2436 }
2437
2438 /**
2439 * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
2440 * @crtc: CRTC
2441 * @crtc_state: atomic state for @crtc
2442 * @encoder: encoder
2443 *
2444 * Find an appropriate DPLL for the given CRTC and encoder combination. A
2445 * reference from the @crtc to the returned pll is registered in the atomic
2446 * state. That configuration is made effective by calling
2447 * intel_shared_dpll_swap_state(). The reference should be released by calling
2448 * intel_release_shared_dpll().
2449 *
2450 * Returns:
2451 * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
2452 */
2453 struct intel_shared_dpll *
2454 intel_get_shared_dpll(struct intel_crtc *crtc,
2455 struct intel_crtc_state *crtc_state,
2456 struct intel_encoder *encoder)
2457 {
2458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2459 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
2460
2461 if (WARN_ON(!dpll_mgr))
2462 return NULL;
2463
2464 return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
2465 }
2466
2467 /**
2468 * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
2469 * @dpll: dpll in use by @crtc
2470 * @crtc: crtc
2471 * @state: atomic state
2472 *
2473 * This function releases the reference from @crtc to @dpll from the
2474 * atomic @state. The new configuration is made effective by calling
2475 * intel_shared_dpll_swap_state().
2476 */
2477 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
2478 struct intel_crtc *crtc,
2479 struct drm_atomic_state *state)
2480 {
2481 struct intel_shared_dpll_state *shared_dpll_state;
2482
2483 shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
2484 shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
2485 }
2486
2487 /**
2488 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
2489 * @dev_priv: i915 drm device
2490 * @hw_state: hw state to be written to the log
2491 *
2492 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
2493 */
2494 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
2495 struct intel_dpll_hw_state *hw_state)
2496 {
2497 if (dev_priv->dpll_mgr) {
2498 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
2499 } else {
2500 /* fallback for platforms that don't use the shared dpll
2501 * infrastructure
2502 */
2503 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
2504 "fp0: 0x%x, fp1: 0x%x\n",
2505 hw_state->dpll,
2506 hw_state->dpll_md,
2507 hw_state->fp0,
2508 hw_state->fp1);
2509 }
2510 }