2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_dpio_phy.h"
25 #include "intel_dpll_mgr.h"
26 #include "intel_drv.h"
31 * Display PLLs used for driving outputs vary by platform. While some have
32 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33 * from a pool. In the latter scenario, it is possible that multiple pipes
34 * share a PLL if their configurations match.
36 * This file provides an abstraction over display PLLs. The function
37 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
38 * users of a PLL are tracked and that tracking is integrated with the atomic
39 * modset interface. During an atomic operation, required PLLs can be reserved
40 * for a given CRTC and encoder configuration by calling
41 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42 * with intel_release_shared_dplls().
43 * Changes to the users are first staged in the atomic state, and then made
44 * effective by calling intel_shared_dpll_swap_state() during the atomic
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 struct intel_shared_dpll_state *shared_dpll)
54 /* Copy shared dpll state */
55 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
56 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
58 shared_dpll[i] = pll->state;
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
65 struct intel_atomic_state *state = to_intel_atomic_state(s);
67 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
69 if (!state->dpll_set) {
70 state->dpll_set = true;
72 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
76 return state->shared_dpll;
80 * intel_get_shared_dpll_by_id - get a DPLL given its id
81 * @dev_priv: i915 device instance
85 * A pointer to the DPLL with @id
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 enum intel_dpll_id id)
91 return &dev_priv->shared_dplls[id];
95 * intel_get_shared_dpll_id - get the id of a DPLL
96 * @dev_priv: i915 device instance
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 struct intel_shared_dpll *pll)
106 if (WARN_ON(pll < dev_priv->shared_dplls||
107 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
110 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
114 void assert_shared_dpll(struct drm_i915_private *dev_priv,
115 struct intel_shared_dpll *pll,
119 struct intel_dpll_hw_state hw_state;
121 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
124 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
125 I915_STATE_WARN(cur_state != state,
126 "%s assertion failure (expected %s, current %s)\n",
127 pll->info->name, onoff(state), onoff(cur_state));
131 * intel_prepare_shared_dpll - call a dpll's prepare hook
132 * @crtc_state: CRTC, and its state, which has a shared dpll
134 * This calls the PLL's prepare hook if it has one and if the PLL is not
135 * already enabled. The prepare hook is platform specific.
137 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
139 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
140 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
141 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
143 if (WARN_ON(pll == NULL))
146 mutex_lock(&dev_priv->dpll_lock);
147 WARN_ON(!pll->state.crtc_mask);
148 if (!pll->active_mask) {
149 DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
151 assert_shared_dpll_disabled(dev_priv, pll);
153 pll->info->funcs->prepare(dev_priv, pll);
155 mutex_unlock(&dev_priv->dpll_lock);
159 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
160 * @crtc_state: CRTC, and its state, which has a shared DPLL
162 * Enable the shared DPLL used by @crtc.
164 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
166 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
167 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
168 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
169 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
170 unsigned int old_mask;
172 if (WARN_ON(pll == NULL))
175 mutex_lock(&dev_priv->dpll_lock);
176 old_mask = pll->active_mask;
178 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
179 WARN_ON(pll->active_mask & crtc_mask))
182 pll->active_mask |= crtc_mask;
184 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
185 pll->info->name, pll->active_mask, pll->on,
190 assert_shared_dpll_enabled(dev_priv, pll);
195 DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
196 pll->info->funcs->enable(dev_priv, pll);
200 mutex_unlock(&dev_priv->dpll_lock);
204 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
205 * @crtc_state: CRTC, and its state, which has a shared DPLL
207 * Disable the shared DPLL used by @crtc.
209 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
213 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
214 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
216 /* PCH only available on ILK+ */
217 if (INTEL_GEN(dev_priv) < 5)
223 mutex_lock(&dev_priv->dpll_lock);
224 if (WARN_ON(!(pll->active_mask & crtc_mask)))
227 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
228 pll->info->name, pll->active_mask, pll->on,
231 assert_shared_dpll_enabled(dev_priv, pll);
234 pll->active_mask &= ~crtc_mask;
235 if (pll->active_mask)
238 DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
239 pll->info->funcs->disable(dev_priv, pll);
243 mutex_unlock(&dev_priv->dpll_lock);
246 static struct intel_shared_dpll *
247 intel_find_shared_dpll(struct intel_atomic_state *state,
248 const struct intel_crtc *crtc,
249 const struct intel_dpll_hw_state *pll_state,
250 enum intel_dpll_id range_min,
251 enum intel_dpll_id range_max)
253 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
254 struct intel_shared_dpll *pll, *unused_pll = NULL;
255 struct intel_shared_dpll_state *shared_dpll;
256 enum intel_dpll_id i;
258 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
260 for (i = range_min; i <= range_max; i++) {
261 pll = &dev_priv->shared_dplls[i];
263 /* Only want to check enabled timings first */
264 if (shared_dpll[i].crtc_mask == 0) {
270 if (memcmp(pll_state,
271 &shared_dpll[i].hw_state,
272 sizeof(*pll_state)) == 0) {
273 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
274 crtc->base.base.id, crtc->base.name,
276 shared_dpll[i].crtc_mask,
282 /* Ok no matching timings, maybe there's a free one? */
284 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
285 crtc->base.base.id, crtc->base.name,
286 unused_pll->info->name);
294 intel_reference_shared_dpll(struct intel_atomic_state *state,
295 const struct intel_crtc *crtc,
296 const struct intel_shared_dpll *pll,
297 const struct intel_dpll_hw_state *pll_state)
299 struct intel_shared_dpll_state *shared_dpll;
300 const enum intel_dpll_id id = pll->info->id;
302 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
304 if (shared_dpll[id].crtc_mask == 0)
305 shared_dpll[id].hw_state = *pll_state;
307 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
308 pipe_name(crtc->pipe));
310 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
313 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
314 const struct intel_crtc *crtc,
315 const struct intel_shared_dpll *pll)
317 struct intel_shared_dpll_state *shared_dpll;
319 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
320 shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
323 static void intel_put_dpll(struct intel_atomic_state *state,
324 struct intel_crtc *crtc)
326 const struct intel_crtc_state *old_crtc_state =
327 intel_atomic_get_old_crtc_state(state, crtc);
328 struct intel_crtc_state *new_crtc_state =
329 intel_atomic_get_new_crtc_state(state, crtc);
331 new_crtc_state->shared_dpll = NULL;
333 if (!old_crtc_state->shared_dpll)
336 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
340 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
341 * @state: atomic state
343 * This is the dpll version of drm_atomic_helper_swap_state() since the
344 * helper does not handle driver-specific global state.
346 * For consistency with atomic helpers this function does a complete swap,
347 * i.e. it also puts the current state into @state, even though there is no
348 * need for that at this moment.
350 void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
352 struct drm_i915_private *dev_priv = to_i915(state->dev);
353 struct intel_shared_dpll_state *shared_dpll;
354 struct intel_shared_dpll *pll;
355 enum intel_dpll_id i;
357 if (!to_intel_atomic_state(state)->dpll_set)
360 shared_dpll = to_intel_atomic_state(state)->shared_dpll;
361 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
362 struct intel_shared_dpll_state tmp;
364 pll = &dev_priv->shared_dplls[i];
367 pll->state = shared_dpll[i];
368 shared_dpll[i] = tmp;
372 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
373 struct intel_shared_dpll *pll,
374 struct intel_dpll_hw_state *hw_state)
376 const enum intel_dpll_id id = pll->info->id;
377 intel_wakeref_t wakeref;
380 wakeref = intel_display_power_get_if_enabled(dev_priv,
381 POWER_DOMAIN_DISPLAY_CORE);
385 val = I915_READ(PCH_DPLL(id));
386 hw_state->dpll = val;
387 hw_state->fp0 = I915_READ(PCH_FP0(id));
388 hw_state->fp1 = I915_READ(PCH_FP1(id));
390 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
392 return val & DPLL_VCO_ENABLE;
395 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
396 struct intel_shared_dpll *pll)
398 const enum intel_dpll_id id = pll->info->id;
400 I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
401 I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
404 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
409 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
411 val = I915_READ(PCH_DREF_CONTROL);
412 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
413 DREF_SUPERSPREAD_SOURCE_MASK));
414 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
417 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
418 struct intel_shared_dpll *pll)
420 const enum intel_dpll_id id = pll->info->id;
422 /* PCH refclock must be enabled first */
423 ibx_assert_pch_refclk_enabled(dev_priv);
425 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
427 /* Wait for the clocks to stabilize. */
428 POSTING_READ(PCH_DPLL(id));
431 /* The pixel multiplier can only be updated once the
432 * DPLL is enabled and the clocks are stable.
436 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
437 POSTING_READ(PCH_DPLL(id));
441 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
442 struct intel_shared_dpll *pll)
444 const enum intel_dpll_id id = pll->info->id;
446 I915_WRITE(PCH_DPLL(id), 0);
447 POSTING_READ(PCH_DPLL(id));
451 static bool ibx_get_dpll(struct intel_atomic_state *state,
452 struct intel_crtc *crtc,
453 struct intel_encoder *encoder)
455 struct intel_crtc_state *crtc_state =
456 intel_atomic_get_new_crtc_state(state, crtc);
457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
458 struct intel_shared_dpll *pll;
459 enum intel_dpll_id i;
461 if (HAS_PCH_IBX(dev_priv)) {
462 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
463 i = (enum intel_dpll_id) crtc->pipe;
464 pll = &dev_priv->shared_dplls[i];
466 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
467 crtc->base.base.id, crtc->base.name,
470 pll = intel_find_shared_dpll(state, crtc,
471 &crtc_state->dpll_hw_state,
479 /* reference the pll */
480 intel_reference_shared_dpll(state, crtc,
481 pll, &crtc_state->dpll_hw_state);
483 crtc_state->shared_dpll = pll;
488 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
489 const struct intel_dpll_hw_state *hw_state)
491 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
492 "fp0: 0x%x, fp1: 0x%x\n",
499 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
500 .prepare = ibx_pch_dpll_prepare,
501 .enable = ibx_pch_dpll_enable,
502 .disable = ibx_pch_dpll_disable,
503 .get_hw_state = ibx_pch_dpll_get_hw_state,
506 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
507 struct intel_shared_dpll *pll)
509 const enum intel_dpll_id id = pll->info->id;
511 I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
512 POSTING_READ(WRPLL_CTL(id));
516 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
517 struct intel_shared_dpll *pll)
519 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
520 POSTING_READ(SPLL_CTL);
524 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
525 struct intel_shared_dpll *pll)
527 const enum intel_dpll_id id = pll->info->id;
530 val = I915_READ(WRPLL_CTL(id));
531 I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
532 POSTING_READ(WRPLL_CTL(id));
535 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
536 struct intel_shared_dpll *pll)
540 val = I915_READ(SPLL_CTL);
541 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
542 POSTING_READ(SPLL_CTL);
545 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
546 struct intel_shared_dpll *pll,
547 struct intel_dpll_hw_state *hw_state)
549 const enum intel_dpll_id id = pll->info->id;
550 intel_wakeref_t wakeref;
553 wakeref = intel_display_power_get_if_enabled(dev_priv,
554 POWER_DOMAIN_DISPLAY_CORE);
558 val = I915_READ(WRPLL_CTL(id));
559 hw_state->wrpll = val;
561 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
563 return val & WRPLL_PLL_ENABLE;
566 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
567 struct intel_shared_dpll *pll,
568 struct intel_dpll_hw_state *hw_state)
570 intel_wakeref_t wakeref;
573 wakeref = intel_display_power_get_if_enabled(dev_priv,
574 POWER_DOMAIN_DISPLAY_CORE);
578 val = I915_READ(SPLL_CTL);
579 hw_state->spll = val;
581 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
583 return val & SPLL_PLL_ENABLE;
587 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
593 /* Constraints for PLL good behavior */
599 struct hsw_wrpll_rnp {
603 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
677 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
678 unsigned int r2, unsigned int n2,
680 struct hsw_wrpll_rnp *best)
682 u64 a, b, c, d, diff, diff_best;
684 /* No best (r,n,p) yet */
693 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
697 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
700 * and we would like delta <= budget.
702 * If the discrepancy is above the PPM-based budget, always prefer to
703 * improve upon the previous solution. However, if you're within the
704 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
706 a = freq2k * budget * p * r2;
707 b = freq2k * budget * best->p * best->r2;
708 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
709 diff_best = abs_diff(freq2k * best->p * best->r2,
710 LC_FREQ_2K * best->n2);
712 d = 1000000 * diff_best;
714 if (a < c && b < d) {
715 /* If both are above the budget, pick the closer */
716 if (best->p * best->r2 * diff < p * r2 * diff_best) {
721 } else if (a >= c && b < d) {
722 /* If A is below the threshold but B is above it? Update. */
726 } else if (a >= c && b >= d) {
727 /* Both are below the limit, so pick the higher n2/(r2*r2) */
728 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
734 /* Otherwise a < c && b >= d, do nothing */
738 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
739 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
743 struct hsw_wrpll_rnp best = { 0, 0, 0 };
746 freq2k = clock / 100;
748 budget = hsw_wrpll_get_budget_for_freq(clock);
750 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
751 * and directly pass the LC PLL to it. */
752 if (freq2k == 5400000) {
760 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
763 * We want R so that REF_MIN <= Ref <= REF_MAX.
764 * Injecting R2 = 2 * R gives:
765 * REF_MAX * r2 > LC_FREQ * 2 and
766 * REF_MIN * r2 < LC_FREQ * 2
768 * Which means the desired boundaries for r2 are:
769 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
772 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
773 r2 <= LC_FREQ * 2 / REF_MIN;
777 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
779 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
780 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
781 * VCO_MAX * r2 > n2 * LC_FREQ and
782 * VCO_MIN * r2 < n2 * LC_FREQ)
784 * Which means the desired boundaries for n2 are:
785 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
787 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
788 n2 <= VCO_MAX * r2 / LC_FREQ;
791 for (p = P_MIN; p <= P_MAX; p += P_INC)
792 hsw_wrpll_update_rnp(freq2k, budget,
802 static struct intel_shared_dpll *
803 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
804 struct intel_crtc *crtc)
806 struct intel_crtc_state *crtc_state =
807 intel_atomic_get_new_crtc_state(state, crtc);
808 struct intel_shared_dpll *pll;
810 unsigned int p, n2, r2;
812 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
814 val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
815 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
816 WRPLL_DIVIDER_POST(p);
818 crtc_state->dpll_hw_state.wrpll = val;
820 pll = intel_find_shared_dpll(state, crtc,
821 &crtc_state->dpll_hw_state,
822 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
830 static struct intel_shared_dpll *
831 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
833 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
834 struct intel_shared_dpll *pll;
835 enum intel_dpll_id pll_id;
836 int clock = crtc_state->port_clock;
840 pll_id = DPLL_ID_LCPLL_810;
843 pll_id = DPLL_ID_LCPLL_1350;
846 pll_id = DPLL_ID_LCPLL_2700;
849 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
853 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
861 static bool hsw_get_dpll(struct intel_atomic_state *state,
862 struct intel_crtc *crtc,
863 struct intel_encoder *encoder)
865 struct intel_crtc_state *crtc_state =
866 intel_atomic_get_new_crtc_state(state, crtc);
867 struct intel_shared_dpll *pll;
869 memset(&crtc_state->dpll_hw_state, 0,
870 sizeof(crtc_state->dpll_hw_state));
872 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
873 pll = hsw_ddi_hdmi_get_dpll(state, crtc);
874 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
875 pll = hsw_ddi_dp_get_dpll(crtc_state);
876 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
877 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
880 crtc_state->dpll_hw_state.spll =
881 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
883 pll = intel_find_shared_dpll(state, crtc,
884 &crtc_state->dpll_hw_state,
885 DPLL_ID_SPLL, DPLL_ID_SPLL);
893 intel_reference_shared_dpll(state, crtc,
894 pll, &crtc_state->dpll_hw_state);
896 crtc_state->shared_dpll = pll;
901 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
902 const struct intel_dpll_hw_state *hw_state)
904 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
905 hw_state->wrpll, hw_state->spll);
908 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
909 .enable = hsw_ddi_wrpll_enable,
910 .disable = hsw_ddi_wrpll_disable,
911 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
914 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
915 .enable = hsw_ddi_spll_enable,
916 .disable = hsw_ddi_spll_disable,
917 .get_hw_state = hsw_ddi_spll_get_hw_state,
920 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
921 struct intel_shared_dpll *pll)
925 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
926 struct intel_shared_dpll *pll)
930 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
931 struct intel_shared_dpll *pll,
932 struct intel_dpll_hw_state *hw_state)
937 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
938 .enable = hsw_ddi_lcpll_enable,
939 .disable = hsw_ddi_lcpll_disable,
940 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
943 struct skl_dpll_regs {
944 i915_reg_t ctl, cfgcr1, cfgcr2;
947 /* this array is indexed by the *shared* pll id */
948 static const struct skl_dpll_regs skl_dpll_regs[4] = {
952 /* DPLL 0 doesn't support HDMI mode */
957 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
958 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
963 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
964 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
969 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
970 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
974 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
975 struct intel_shared_dpll *pll)
977 const enum intel_dpll_id id = pll->info->id;
980 val = I915_READ(DPLL_CTRL1);
982 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
984 DPLL_CTRL1_LINK_RATE_MASK(id));
985 val |= pll->state.hw_state.ctrl1 << (id * 6);
987 I915_WRITE(DPLL_CTRL1, val);
988 POSTING_READ(DPLL_CTRL1);
991 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
992 struct intel_shared_dpll *pll)
994 const struct skl_dpll_regs *regs = skl_dpll_regs;
995 const enum intel_dpll_id id = pll->info->id;
997 skl_ddi_pll_write_ctrl1(dev_priv, pll);
999 I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1000 I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1001 POSTING_READ(regs[id].cfgcr1);
1002 POSTING_READ(regs[id].cfgcr2);
1004 /* the enable bit is always bit 31 */
1005 I915_WRITE(regs[id].ctl,
1006 I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
1008 if (intel_wait_for_register(&dev_priv->uncore,
1013 DRM_ERROR("DPLL %d not locked\n", id);
1016 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1017 struct intel_shared_dpll *pll)
1019 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1022 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1023 struct intel_shared_dpll *pll)
1025 const struct skl_dpll_regs *regs = skl_dpll_regs;
1026 const enum intel_dpll_id id = pll->info->id;
1028 /* the enable bit is always bit 31 */
1029 I915_WRITE(regs[id].ctl,
1030 I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1031 POSTING_READ(regs[id].ctl);
1034 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1035 struct intel_shared_dpll *pll)
1039 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1040 struct intel_shared_dpll *pll,
1041 struct intel_dpll_hw_state *hw_state)
1044 const struct skl_dpll_regs *regs = skl_dpll_regs;
1045 const enum intel_dpll_id id = pll->info->id;
1046 intel_wakeref_t wakeref;
1049 wakeref = intel_display_power_get_if_enabled(dev_priv,
1050 POWER_DOMAIN_DISPLAY_CORE);
1056 val = I915_READ(regs[id].ctl);
1057 if (!(val & LCPLL_PLL_ENABLE))
1060 val = I915_READ(DPLL_CTRL1);
1061 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1063 /* avoid reading back stale values if HDMI mode is not enabled */
1064 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1065 hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1066 hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1071 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1076 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1077 struct intel_shared_dpll *pll,
1078 struct intel_dpll_hw_state *hw_state)
1080 const struct skl_dpll_regs *regs = skl_dpll_regs;
1081 const enum intel_dpll_id id = pll->info->id;
1082 intel_wakeref_t wakeref;
1086 wakeref = intel_display_power_get_if_enabled(dev_priv,
1087 POWER_DOMAIN_DISPLAY_CORE);
1093 /* DPLL0 is always enabled since it drives CDCLK */
1094 val = I915_READ(regs[id].ctl);
1095 if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1098 val = I915_READ(DPLL_CTRL1);
1099 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1104 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1109 struct skl_wrpll_context {
1110 u64 min_deviation; /* current minimal deviation */
1111 u64 central_freq; /* chosen central freq */
1112 u64 dco_freq; /* chosen dco freq */
1113 unsigned int p; /* chosen divider */
1116 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1118 memset(ctx, 0, sizeof(*ctx));
1120 ctx->min_deviation = U64_MAX;
1123 /* DCO freq must be within +1%/-6% of the DCO central freq */
1124 #define SKL_DCO_MAX_PDEVIATION 100
1125 #define SKL_DCO_MAX_NDEVIATION 600
1127 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1130 unsigned int divider)
1134 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1137 /* positive deviation */
1138 if (dco_freq >= central_freq) {
1139 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1140 deviation < ctx->min_deviation) {
1141 ctx->min_deviation = deviation;
1142 ctx->central_freq = central_freq;
1143 ctx->dco_freq = dco_freq;
1146 /* negative deviation */
1147 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1148 deviation < ctx->min_deviation) {
1149 ctx->min_deviation = deviation;
1150 ctx->central_freq = central_freq;
1151 ctx->dco_freq = dco_freq;
1156 static void skl_wrpll_get_multipliers(unsigned int p,
1157 unsigned int *p0 /* out */,
1158 unsigned int *p1 /* out */,
1159 unsigned int *p2 /* out */)
1163 unsigned int half = p / 2;
1165 if (half == 1 || half == 2 || half == 3 || half == 5) {
1169 } else if (half % 2 == 0) {
1173 } else if (half % 3 == 0) {
1177 } else if (half % 7 == 0) {
1182 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1186 } else if (p == 5 || p == 7) {
1190 } else if (p == 15) {
1194 } else if (p == 21) {
1198 } else if (p == 35) {
1205 struct skl_wrpll_params {
1215 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1218 u32 p0, u32 p1, u32 p2)
1222 switch (central_freq) {
1224 params->central_freq = 0;
1227 params->central_freq = 1;
1230 params->central_freq = 3;
1247 WARN(1, "Incorrect PDiv\n");
1264 WARN(1, "Incorrect KDiv\n");
1267 params->qdiv_ratio = p1;
1268 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1270 dco_freq = p0 * p1 * p2 * afe_clock;
1273 * Intermediate values are in Hz.
1274 * Divide by MHz to match bsepc
1276 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1277 params->dco_fraction =
1278 div_u64((div_u64(dco_freq, 24) -
1279 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1283 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1284 struct skl_wrpll_params *wrpll_params)
1286 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1287 u64 dco_central_freq[3] = { 8400000000ULL,
1290 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1291 24, 28, 30, 32, 36, 40, 42, 44,
1292 48, 52, 54, 56, 60, 64, 66, 68,
1293 70, 72, 76, 78, 80, 84, 88, 90,
1295 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1296 static const struct {
1300 { even_dividers, ARRAY_SIZE(even_dividers) },
1301 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1303 struct skl_wrpll_context ctx;
1304 unsigned int dco, d, i;
1305 unsigned int p0, p1, p2;
1307 skl_wrpll_context_init(&ctx);
1309 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1310 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1311 for (i = 0; i < dividers[d].n_dividers; i++) {
1312 unsigned int p = dividers[d].list[i];
1313 u64 dco_freq = p * afe_clock;
1315 skl_wrpll_try_divider(&ctx,
1316 dco_central_freq[dco],
1320 * Skip the remaining dividers if we're sure to
1321 * have found the definitive divider, we can't
1322 * improve a 0 deviation.
1324 if (ctx.min_deviation == 0)
1325 goto skip_remaining_dividers;
1329 skip_remaining_dividers:
1331 * If a solution is found with an even divider, prefer
1334 if (d == 0 && ctx.p)
1339 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1344 * gcc incorrectly analyses that these can be used without being
1345 * initialized. To be fair, it's hard to guess.
1348 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1349 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1355 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1357 u32 ctrl1, cfgcr1, cfgcr2;
1358 struct skl_wrpll_params wrpll_params = { 0, };
1361 * See comment in intel_dpll_hw_state to understand why we always use 0
1362 * as the DPLL id in this function.
1364 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1366 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1368 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1372 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1373 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1374 wrpll_params.dco_integer;
1376 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1377 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1378 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1379 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1380 wrpll_params.central_freq;
1382 memset(&crtc_state->dpll_hw_state, 0,
1383 sizeof(crtc_state->dpll_hw_state));
1385 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1386 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1387 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1392 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1397 * See comment in intel_dpll_hw_state to understand why we always use 0
1398 * as the DPLL id in this function.
1400 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1401 switch (crtc_state->port_clock / 2) {
1403 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1406 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1409 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1413 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1416 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1419 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1423 memset(&crtc_state->dpll_hw_state, 0,
1424 sizeof(crtc_state->dpll_hw_state));
1426 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1431 static bool skl_get_dpll(struct intel_atomic_state *state,
1432 struct intel_crtc *crtc,
1433 struct intel_encoder *encoder)
1435 struct intel_crtc_state *crtc_state =
1436 intel_atomic_get_new_crtc_state(state, crtc);
1437 struct intel_shared_dpll *pll;
1440 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1441 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1443 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1446 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1447 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1449 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1456 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1457 pll = intel_find_shared_dpll(state, crtc,
1458 &crtc_state->dpll_hw_state,
1462 pll = intel_find_shared_dpll(state, crtc,
1463 &crtc_state->dpll_hw_state,
1469 intel_reference_shared_dpll(state, crtc,
1470 pll, &crtc_state->dpll_hw_state);
1472 crtc_state->shared_dpll = pll;
1477 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1478 const struct intel_dpll_hw_state *hw_state)
1480 DRM_DEBUG_KMS("dpll_hw_state: "
1481 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1487 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1488 .enable = skl_ddi_pll_enable,
1489 .disable = skl_ddi_pll_disable,
1490 .get_hw_state = skl_ddi_pll_get_hw_state,
1493 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1494 .enable = skl_ddi_dpll0_enable,
1495 .disable = skl_ddi_dpll0_disable,
1496 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1499 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1500 struct intel_shared_dpll *pll)
1503 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1505 enum dpio_channel ch;
1507 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1509 /* Non-SSC reference */
1510 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1511 temp |= PORT_PLL_REF_SEL;
1512 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1514 if (IS_GEMINILAKE(dev_priv)) {
1515 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1516 temp |= PORT_PLL_POWER_ENABLE;
1517 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1519 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1520 PORT_PLL_POWER_STATE), 200))
1521 DRM_ERROR("Power state not set for PLL:%d\n", port);
1524 /* Disable 10 bit clock */
1525 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1526 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1527 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1530 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1531 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1532 temp |= pll->state.hw_state.ebb0;
1533 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1535 /* Write M2 integer */
1536 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1537 temp &= ~PORT_PLL_M2_MASK;
1538 temp |= pll->state.hw_state.pll0;
1539 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1542 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1543 temp &= ~PORT_PLL_N_MASK;
1544 temp |= pll->state.hw_state.pll1;
1545 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1547 /* Write M2 fraction */
1548 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1549 temp &= ~PORT_PLL_M2_FRAC_MASK;
1550 temp |= pll->state.hw_state.pll2;
1551 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1553 /* Write M2 fraction enable */
1554 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1555 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1556 temp |= pll->state.hw_state.pll3;
1557 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1560 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1561 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1562 temp &= ~PORT_PLL_INT_COEFF_MASK;
1563 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1564 temp |= pll->state.hw_state.pll6;
1565 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1567 /* Write calibration val */
1568 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1569 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1570 temp |= pll->state.hw_state.pll8;
1571 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1573 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1574 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1575 temp |= pll->state.hw_state.pll9;
1576 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1578 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1579 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1580 temp &= ~PORT_PLL_DCO_AMP_MASK;
1581 temp |= pll->state.hw_state.pll10;
1582 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1584 /* Recalibrate with new settings */
1585 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1586 temp |= PORT_PLL_RECALIBRATE;
1587 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1588 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1589 temp |= pll->state.hw_state.ebb4;
1590 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1593 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1594 temp |= PORT_PLL_ENABLE;
1595 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1596 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1598 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1600 DRM_ERROR("PLL %d not locked\n", port);
1602 if (IS_GEMINILAKE(dev_priv)) {
1603 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1604 temp |= DCC_DELAY_RANGE_2;
1605 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1609 * While we write to the group register to program all lanes at once we
1610 * can read only lane registers and we pick lanes 0/1 for that.
1612 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1613 temp &= ~LANE_STAGGER_MASK;
1614 temp &= ~LANESTAGGER_STRAP_OVRD;
1615 temp |= pll->state.hw_state.pcsdw12;
1616 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1619 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1620 struct intel_shared_dpll *pll)
1622 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1625 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1626 temp &= ~PORT_PLL_ENABLE;
1627 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1628 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1630 if (IS_GEMINILAKE(dev_priv)) {
1631 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1632 temp &= ~PORT_PLL_POWER_ENABLE;
1633 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1635 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1636 PORT_PLL_POWER_STATE), 200))
1637 DRM_ERROR("Power state not reset for PLL:%d\n", port);
1641 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1642 struct intel_shared_dpll *pll,
1643 struct intel_dpll_hw_state *hw_state)
1645 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1646 intel_wakeref_t wakeref;
1648 enum dpio_channel ch;
1652 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1654 wakeref = intel_display_power_get_if_enabled(dev_priv,
1655 POWER_DOMAIN_DISPLAY_CORE);
1661 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1662 if (!(val & PORT_PLL_ENABLE))
1665 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1666 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1668 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1669 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1671 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1672 hw_state->pll0 &= PORT_PLL_M2_MASK;
1674 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1675 hw_state->pll1 &= PORT_PLL_N_MASK;
1677 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1678 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1680 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1681 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1683 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1684 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1685 PORT_PLL_INT_COEFF_MASK |
1686 PORT_PLL_GAIN_CTL_MASK;
1688 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1689 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1691 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1692 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1694 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1695 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1696 PORT_PLL_DCO_AMP_MASK;
1699 * While we write to the group register to program all lanes at once we
1700 * can read only lane registers. We configure all lanes the same way, so
1701 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1703 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1704 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1705 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1707 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1708 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1713 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1718 /* bxt clock parameters */
1719 struct bxt_clk_div {
1731 /* pre-calculated values for DP linkrates */
1732 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1733 {162000, 4, 2, 32, 1677722, 1, 1},
1734 {270000, 4, 1, 27, 0, 0, 1},
1735 {540000, 2, 1, 27, 0, 0, 1},
1736 {216000, 3, 2, 32, 1677722, 1, 1},
1737 {243000, 4, 1, 24, 1258291, 1, 1},
1738 {324000, 4, 1, 32, 1677722, 1, 1},
1739 {432000, 3, 1, 32, 1677722, 1, 1}
1743 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1744 struct bxt_clk_div *clk_div)
1746 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1747 struct dpll best_clock;
1749 /* Calculate HDMI div */
1751 * FIXME: tie the following calculation into
1752 * i9xx_crtc_compute_clock
1754 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1755 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1756 crtc_state->port_clock,
1757 pipe_name(crtc->pipe));
1761 clk_div->p1 = best_clock.p1;
1762 clk_div->p2 = best_clock.p2;
1763 WARN_ON(best_clock.m1 != 2);
1764 clk_div->n = best_clock.n;
1765 clk_div->m2_int = best_clock.m2 >> 22;
1766 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1767 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1769 clk_div->vco = best_clock.vco;
1774 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1775 struct bxt_clk_div *clk_div)
1777 int clock = crtc_state->port_clock;
1780 *clk_div = bxt_dp_clk_val[0];
1781 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1782 if (bxt_dp_clk_val[i].clock == clock) {
1783 *clk_div = bxt_dp_clk_val[i];
1788 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1791 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1792 const struct bxt_clk_div *clk_div)
1794 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1795 int clock = crtc_state->port_clock;
1796 int vco = clk_div->vco;
1797 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1800 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1802 if (vco >= 6200000 && vco <= 6700000) {
1807 } else if ((vco > 5400000 && vco < 6200000) ||
1808 (vco >= 4800000 && vco < 5400000)) {
1813 } else if (vco == 5400000) {
1819 DRM_ERROR("Invalid VCO\n");
1825 else if (clock > 135000)
1827 else if (clock > 67000)
1829 else if (clock > 33000)
1834 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1835 dpll_hw_state->pll0 = clk_div->m2_int;
1836 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1837 dpll_hw_state->pll2 = clk_div->m2_frac;
1839 if (clk_div->m2_frac_en)
1840 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1842 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1843 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1845 dpll_hw_state->pll8 = targ_cnt;
1847 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1849 dpll_hw_state->pll10 =
1850 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1851 | PORT_PLL_DCO_AMP_OVR_EN_H;
1853 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1855 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1861 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1863 struct bxt_clk_div clk_div = {};
1865 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1867 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1871 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1873 struct bxt_clk_div clk_div = {};
1875 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1877 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1880 static bool bxt_get_dpll(struct intel_atomic_state *state,
1881 struct intel_crtc *crtc,
1882 struct intel_encoder *encoder)
1884 struct intel_crtc_state *crtc_state =
1885 intel_atomic_get_new_crtc_state(state, crtc);
1886 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1887 struct intel_shared_dpll *pll;
1888 enum intel_dpll_id id;
1890 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1891 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1894 if (intel_crtc_has_dp_encoder(crtc_state) &&
1895 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1898 /* 1:1 mapping between ports and PLLs */
1899 id = (enum intel_dpll_id) encoder->port;
1900 pll = intel_get_shared_dpll_by_id(dev_priv, id);
1902 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1903 crtc->base.base.id, crtc->base.name, pll->info->name);
1905 intel_reference_shared_dpll(state, crtc,
1906 pll, &crtc_state->dpll_hw_state);
1908 crtc_state->shared_dpll = pll;
1913 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1914 const struct intel_dpll_hw_state *hw_state)
1916 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1917 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1918 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1932 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1933 .enable = bxt_ddi_pll_enable,
1934 .disable = bxt_ddi_pll_disable,
1935 .get_hw_state = bxt_ddi_pll_get_hw_state,
1938 struct intel_dpll_mgr {
1939 const struct dpll_info *dpll_info;
1941 bool (*get_dplls)(struct intel_atomic_state *state,
1942 struct intel_crtc *crtc,
1943 struct intel_encoder *encoder);
1944 void (*put_dplls)(struct intel_atomic_state *state,
1945 struct intel_crtc *crtc);
1946 void (*update_active_dpll)(struct intel_atomic_state *state,
1947 struct intel_crtc *crtc,
1948 struct intel_encoder *encoder);
1949 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1950 const struct intel_dpll_hw_state *hw_state);
1953 static const struct dpll_info pch_plls[] = {
1954 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1955 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1959 static const struct intel_dpll_mgr pch_pll_mgr = {
1960 .dpll_info = pch_plls,
1961 .get_dplls = ibx_get_dpll,
1962 .put_dplls = intel_put_dpll,
1963 .dump_hw_state = ibx_dump_hw_state,
1966 static const struct dpll_info hsw_plls[] = {
1967 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1968 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1969 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1970 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1971 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1972 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1976 static const struct intel_dpll_mgr hsw_pll_mgr = {
1977 .dpll_info = hsw_plls,
1978 .get_dplls = hsw_get_dpll,
1979 .put_dplls = intel_put_dpll,
1980 .dump_hw_state = hsw_dump_hw_state,
1983 static const struct dpll_info skl_plls[] = {
1984 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1985 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1986 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1987 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1991 static const struct intel_dpll_mgr skl_pll_mgr = {
1992 .dpll_info = skl_plls,
1993 .get_dplls = skl_get_dpll,
1994 .put_dplls = intel_put_dpll,
1995 .dump_hw_state = skl_dump_hw_state,
1998 static const struct dpll_info bxt_plls[] = {
1999 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2000 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2001 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2005 static const struct intel_dpll_mgr bxt_pll_mgr = {
2006 .dpll_info = bxt_plls,
2007 .get_dplls = bxt_get_dpll,
2008 .put_dplls = intel_put_dpll,
2009 .dump_hw_state = bxt_dump_hw_state,
2012 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2013 struct intel_shared_dpll *pll)
2015 const enum intel_dpll_id id = pll->info->id;
2018 /* 1. Enable DPLL power in DPLL_ENABLE. */
2019 val = I915_READ(CNL_DPLL_ENABLE(id));
2020 val |= PLL_POWER_ENABLE;
2021 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2023 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2024 if (intel_wait_for_register(&dev_priv->uncore,
2025 CNL_DPLL_ENABLE(id),
2029 DRM_ERROR("PLL %d Power not enabled\n", id);
2032 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2033 * select DP mode, and set DP link rate.
2035 val = pll->state.hw_state.cfgcr0;
2036 I915_WRITE(CNL_DPLL_CFGCR0(id), val);
2038 /* 4. Reab back to ensure writes completed */
2039 POSTING_READ(CNL_DPLL_CFGCR0(id));
2041 /* 3. Configure DPLL_CFGCR0 */
2042 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
2043 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2044 val = pll->state.hw_state.cfgcr1;
2045 I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2046 /* 4. Reab back to ensure writes completed */
2047 POSTING_READ(CNL_DPLL_CFGCR1(id));
2051 * 5. If the frequency will result in a change to the voltage
2052 * requirement, follow the Display Voltage Frequency Switching
2053 * Sequence Before Frequency Change
2055 * Note: DVFS is actually handled via the cdclk code paths,
2056 * hence we do nothing here.
2059 /* 6. Enable DPLL in DPLL_ENABLE. */
2060 val = I915_READ(CNL_DPLL_ENABLE(id));
2062 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2064 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2065 if (intel_wait_for_register(&dev_priv->uncore,
2066 CNL_DPLL_ENABLE(id),
2070 DRM_ERROR("PLL %d not locked\n", id);
2073 * 8. If the frequency will result in a change to the voltage
2074 * requirement, follow the Display Voltage Frequency Switching
2075 * Sequence After Frequency Change
2077 * Note: DVFS is actually handled via the cdclk code paths,
2078 * hence we do nothing here.
2082 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2083 * Done at intel_ddi_clk_select
2087 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2088 struct intel_shared_dpll *pll)
2090 const enum intel_dpll_id id = pll->info->id;
2094 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2095 * Done at intel_ddi_post_disable
2099 * 2. If the frequency will result in a change to the voltage
2100 * requirement, follow the Display Voltage Frequency Switching
2101 * Sequence Before Frequency Change
2103 * Note: DVFS is actually handled via the cdclk code paths,
2104 * hence we do nothing here.
2107 /* 3. Disable DPLL through DPLL_ENABLE. */
2108 val = I915_READ(CNL_DPLL_ENABLE(id));
2110 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2112 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2113 if (intel_wait_for_register(&dev_priv->uncore,
2114 CNL_DPLL_ENABLE(id),
2118 DRM_ERROR("PLL %d locked\n", id);
2121 * 5. If the frequency will result in a change to the voltage
2122 * requirement, follow the Display Voltage Frequency Switching
2123 * Sequence After Frequency Change
2125 * Note: DVFS is actually handled via the cdclk code paths,
2126 * hence we do nothing here.
2129 /* 6. Disable DPLL power in DPLL_ENABLE. */
2130 val = I915_READ(CNL_DPLL_ENABLE(id));
2131 val &= ~PLL_POWER_ENABLE;
2132 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2134 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2135 if (intel_wait_for_register(&dev_priv->uncore,
2136 CNL_DPLL_ENABLE(id),
2140 DRM_ERROR("PLL %d Power not disabled\n", id);
2143 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2144 struct intel_shared_dpll *pll,
2145 struct intel_dpll_hw_state *hw_state)
2147 const enum intel_dpll_id id = pll->info->id;
2148 intel_wakeref_t wakeref;
2152 wakeref = intel_display_power_get_if_enabled(dev_priv,
2153 POWER_DOMAIN_DISPLAY_CORE);
2159 val = I915_READ(CNL_DPLL_ENABLE(id));
2160 if (!(val & PLL_ENABLE))
2163 val = I915_READ(CNL_DPLL_CFGCR0(id));
2164 hw_state->cfgcr0 = val;
2166 /* avoid reading back stale values if HDMI mode is not enabled */
2167 if (val & DPLL_CFGCR0_HDMI_MODE) {
2168 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2173 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2178 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2179 int *qdiv, int *kdiv)
2182 if (bestdiv % 2 == 0) {
2187 } else if (bestdiv % 4 == 0) {
2189 *qdiv = bestdiv / 4;
2191 } else if (bestdiv % 6 == 0) {
2193 *qdiv = bestdiv / 6;
2195 } else if (bestdiv % 5 == 0) {
2197 *qdiv = bestdiv / 10;
2199 } else if (bestdiv % 14 == 0) {
2201 *qdiv = bestdiv / 14;
2205 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2209 } else { /* 9, 15, 21 */
2210 *pdiv = bestdiv / 3;
2217 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2218 u32 dco_freq, u32 ref_freq,
2219 int pdiv, int qdiv, int kdiv)
2234 WARN(1, "Incorrect KDiv\n");
2251 WARN(1, "Incorrect PDiv\n");
2254 WARN_ON(kdiv != 2 && qdiv != 1);
2256 params->qdiv_ratio = qdiv;
2257 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2259 dco = div_u64((u64)dco_freq << 15, ref_freq);
2261 params->dco_integer = dco >> 15;
2262 params->dco_fraction = dco & 0x7fff;
2265 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2267 int ref_clock = dev_priv->cdclk.hw.ref;
2270 * For ICL+, the spec states: if reference frequency is 38.4,
2271 * use 19.2 because the DPLL automatically divides that by 2.
2273 if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2280 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2281 struct skl_wrpll_params *wrpll_params)
2283 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2284 u32 afe_clock = crtc_state->port_clock * 5;
2286 u32 dco_min = 7998000;
2287 u32 dco_max = 10000000;
2288 u32 dco_mid = (dco_min + dco_max) / 2;
2289 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2290 18, 20, 24, 28, 30, 32, 36, 40,
2291 42, 44, 48, 50, 52, 54, 56, 60,
2292 64, 66, 68, 70, 72, 76, 78, 80,
2293 84, 88, 90, 92, 96, 98, 100, 102,
2294 3, 5, 7, 9, 15, 21 };
2295 u32 dco, best_dco = 0, dco_centrality = 0;
2296 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2297 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2299 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2300 dco = afe_clock * dividers[d];
2302 if ((dco <= dco_max) && (dco >= dco_min)) {
2303 dco_centrality = abs(dco - dco_mid);
2305 if (dco_centrality < best_dco_centrality) {
2306 best_dco_centrality = dco_centrality;
2307 best_div = dividers[d];
2316 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2318 ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2320 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2326 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2329 struct skl_wrpll_params wrpll_params = { 0, };
2331 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2333 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2336 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2337 wrpll_params.dco_integer;
2339 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2340 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2341 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2342 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2343 DPLL_CFGCR1_CENTRAL_FREQ;
2345 memset(&crtc_state->dpll_hw_state, 0,
2346 sizeof(crtc_state->dpll_hw_state));
2348 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2349 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2354 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2358 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2360 switch (crtc_state->port_clock / 2) {
2362 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2365 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2368 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2372 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2375 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2378 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2381 /* Some SKUs may require elevated I/O voltage to support this */
2382 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2385 /* Some SKUs may require elevated I/O voltage to support this */
2386 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2390 memset(&crtc_state->dpll_hw_state, 0,
2391 sizeof(crtc_state->dpll_hw_state));
2393 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2398 static bool cnl_get_dpll(struct intel_atomic_state *state,
2399 struct intel_crtc *crtc,
2400 struct intel_encoder *encoder)
2402 struct intel_crtc_state *crtc_state =
2403 intel_atomic_get_new_crtc_state(state, crtc);
2404 struct intel_shared_dpll *pll;
2407 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2408 bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2410 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2413 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2414 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2416 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2420 DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2421 crtc_state->output_types);
2425 pll = intel_find_shared_dpll(state, crtc,
2426 &crtc_state->dpll_hw_state,
2430 DRM_DEBUG_KMS("No PLL selected\n");
2434 intel_reference_shared_dpll(state, crtc,
2435 pll, &crtc_state->dpll_hw_state);
2437 crtc_state->shared_dpll = pll;
2442 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2443 const struct intel_dpll_hw_state *hw_state)
2445 DRM_DEBUG_KMS("dpll_hw_state: "
2446 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2451 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2452 .enable = cnl_ddi_pll_enable,
2453 .disable = cnl_ddi_pll_disable,
2454 .get_hw_state = cnl_ddi_pll_get_hw_state,
2457 static const struct dpll_info cnl_plls[] = {
2458 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2459 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2460 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2464 static const struct intel_dpll_mgr cnl_pll_mgr = {
2465 .dpll_info = cnl_plls,
2466 .get_dplls = cnl_get_dpll,
2467 .put_dplls = intel_put_dpll,
2468 .dump_hw_state = cnl_dump_hw_state,
2471 struct icl_combo_pll_params {
2473 struct skl_wrpll_params wrpll;
2477 * These values alrea already adjusted: they're the bits we write to the
2478 * registers, not the logical values.
2480 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2482 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2483 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2486 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2488 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2489 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2491 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2492 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2495 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2497 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2498 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2501 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2503 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2504 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2508 /* Also used for 38.4 MHz values. */
2509 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2511 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2512 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2514 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2515 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2517 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2518 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2520 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2521 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2523 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2524 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2526 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2527 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2529 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2530 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2532 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2533 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2536 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2537 .dco_integer = 0x151, .dco_fraction = 0x4000,
2538 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2541 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2542 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2543 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2546 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2547 struct skl_wrpll_params *pll_params)
2549 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2550 const struct icl_combo_pll_params *params =
2551 dev_priv->cdclk.hw.ref == 24000 ?
2552 icl_dp_combo_pll_24MHz_values :
2553 icl_dp_combo_pll_19_2MHz_values;
2554 int clock = crtc_state->port_clock;
2557 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2558 if (clock == params[i].clock) {
2559 *pll_params = params[i].wrpll;
2564 MISSING_CASE(clock);
2568 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2569 struct skl_wrpll_params *pll_params)
2571 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2573 *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2574 icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2578 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2579 struct intel_encoder *encoder,
2580 struct intel_dpll_hw_state *pll_state)
2582 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2584 struct skl_wrpll_params pll_params = { 0 };
2587 if (intel_port_is_tc(dev_priv, encoder->port))
2588 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2589 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2590 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2591 ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2593 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2598 cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2599 pll_params.dco_integer;
2601 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2602 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2603 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2604 DPLL_CFGCR1_PDIV(pll_params.pdiv) |
2605 DPLL_CFGCR1_CENTRAL_FREQ_8400;
2607 memset(pll_state, 0, sizeof(*pll_state));
2609 pll_state->cfgcr0 = cfgcr0;
2610 pll_state->cfgcr1 = cfgcr1;
2616 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2618 return id - DPLL_ID_ICL_MGPLL1;
2621 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2623 return tc_port + DPLL_ID_ICL_MGPLL1;
2626 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2627 u32 *target_dco_khz,
2628 struct intel_dpll_hw_state *state)
2630 u32 dco_min_freq, dco_max_freq;
2631 int div1_vals[] = {7, 5, 3, 2};
2635 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2636 dco_max_freq = is_dp ? 8100000 : 10000000;
2638 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2639 int div1 = div1_vals[i];
2641 for (div2 = 10; div2 > 0; div2--) {
2642 int dco = div1 * div2 * clock_khz * 5;
2643 int a_divratio, tlinedrv, inputsel;
2646 if (dco < dco_min_freq || dco > dco_max_freq)
2650 a_divratio = is_dp ? 10 : 5;
2656 inputsel = is_dp ? 0 : 1;
2663 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2666 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2669 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2672 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2676 *target_dco_khz = dco;
2678 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2680 state->mg_clktop2_coreclkctl1 =
2681 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2683 state->mg_clktop2_hsclkctl =
2684 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2685 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2687 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2697 * The specification for this function uses real numbers, so the math had to be
2698 * adapted to integer-only calculation, that's why it looks so different.
2700 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2701 struct intel_dpll_hw_state *pll_state)
2703 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2704 int refclk_khz = dev_priv->cdclk.hw.ref;
2705 int clock = crtc_state->port_clock;
2706 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2707 u32 iref_ndiv, iref_trim, iref_pulse_w;
2708 u32 prop_coeff, int_coeff;
2709 u32 tdc_targetcnt, feedfwgain;
2710 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2712 bool use_ssc = false;
2713 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2715 memset(pll_state, 0, sizeof(*pll_state));
2717 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2719 DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2724 m2div_int = dco_khz / (refclk_khz * m1div);
2725 if (m2div_int > 255) {
2727 m2div_int = dco_khz / (refclk_khz * m1div);
2728 if (m2div_int > 255) {
2729 DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2734 m2div_rem = dco_khz % (refclk_khz * m1div);
2736 tmp = (u64)m2div_rem * (1 << 22);
2737 do_div(tmp, refclk_khz * m1div);
2740 switch (refclk_khz) {
2757 MISSING_CASE(refclk_khz);
2762 * tdc_res = 0.000003
2763 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2765 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2766 * was supposed to be a division, but we rearranged the operations of
2767 * the formula to avoid early divisions so we don't multiply the
2770 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2771 * we also rearrange to work with integers.
2773 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2774 * last division by 10.
2776 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2779 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2780 * 32 bits. That's not a problem since we round the division down
2783 feedfwgain = (use_ssc || m2div_rem > 0) ?
2784 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2786 if (dco_khz >= 9000000) {
2795 tmp = mul_u32_u32(dco_khz, 47 * 32);
2796 do_div(tmp, refclk_khz * m1div * 10000);
2799 tmp = mul_u32_u32(dco_khz, 1000);
2800 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2807 pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2808 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2809 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2811 pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2812 MG_PLL_DIV1_DITHER_DIV_2 |
2813 MG_PLL_DIV1_NDIVRATIO(1) |
2814 MG_PLL_DIV1_FBPREDIV(m1div);
2816 pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2817 MG_PLL_LF_AFCCNTSEL_512 |
2818 MG_PLL_LF_GAINCTRL(1) |
2819 MG_PLL_LF_INT_COEFF(int_coeff) |
2820 MG_PLL_LF_PROP_COEFF(prop_coeff);
2822 pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2823 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2824 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2825 MG_PLL_FRAC_LOCK_DCODITHEREN |
2826 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2827 if (use_ssc || m2div_rem > 0)
2828 pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2830 pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2831 MG_PLL_SSC_TYPE(2) |
2832 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2833 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2835 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2837 pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2838 MG_PLL_TDC_COLDST_IREFINT_EN |
2839 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2840 MG_PLL_TDC_TDCOVCCORR_EN |
2841 MG_PLL_TDC_TDCSEL(3);
2843 pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2844 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2845 MG_PLL_BIAS_BIAS_BONUS(10) |
2846 MG_PLL_BIAS_BIASCAL_EN |
2847 MG_PLL_BIAS_CTRIM(12) |
2848 MG_PLL_BIAS_VREF_RDAC(4) |
2849 MG_PLL_BIAS_IREFTRIM(iref_trim);
2851 if (refclk_khz == 38400) {
2852 pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2853 pll_state->mg_pll_bias_mask = 0;
2855 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2856 pll_state->mg_pll_bias_mask = -1U;
2859 pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2860 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2866 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2867 * @crtc_state: state for the CRTC to select the DPLL for
2868 * @port_dpll_id: the active @port_dpll_id to select
2870 * Select the given @port_dpll_id instance from the DPLLs reserved for the
2873 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2874 enum icl_port_dpll_id port_dpll_id)
2876 struct icl_port_dpll *port_dpll =
2877 &crtc_state->icl_port_dplls[port_dpll_id];
2879 crtc_state->shared_dpll = port_dpll->pll;
2880 crtc_state->dpll_hw_state = port_dpll->hw_state;
2883 static void icl_update_active_dpll(struct intel_atomic_state *state,
2884 struct intel_crtc *crtc,
2885 struct intel_encoder *encoder)
2887 struct intel_crtc_state *crtc_state =
2888 intel_atomic_get_new_crtc_state(state, crtc);
2889 struct intel_digital_port *primary_port;
2890 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2892 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
2893 enc_to_mst(&encoder->base)->primary :
2894 enc_to_dig_port(&encoder->base);
2897 (primary_port->tc_mode == TC_PORT_DP_ALT ||
2898 primary_port->tc_mode == TC_PORT_LEGACY))
2899 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
2901 icl_set_active_port_dpll(crtc_state, port_dpll_id);
2904 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
2905 struct intel_crtc *crtc,
2906 struct intel_encoder *encoder)
2908 struct intel_crtc_state *crtc_state =
2909 intel_atomic_get_new_crtc_state(state, crtc);
2910 struct icl_port_dpll *port_dpll =
2911 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2912 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2913 enum port port = encoder->port;
2914 bool has_dpll4 = false;
2916 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2917 DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
2922 if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
2925 port_dpll->pll = intel_find_shared_dpll(state, crtc,
2926 &port_dpll->hw_state,
2928 has_dpll4 ? DPLL_ID_EHL_DPLL4
2929 : DPLL_ID_ICL_DPLL1);
2930 if (!port_dpll->pll) {
2931 DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
2932 port_name(encoder->port));
2936 intel_reference_shared_dpll(state, crtc,
2937 port_dpll->pll, &port_dpll->hw_state);
2939 icl_update_active_dpll(state, crtc, encoder);
2944 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
2945 struct intel_crtc *crtc,
2946 struct intel_encoder *encoder)
2948 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2949 struct intel_crtc_state *crtc_state =
2950 intel_atomic_get_new_crtc_state(state, crtc);
2951 struct icl_port_dpll *port_dpll;
2952 enum intel_dpll_id dpll_id;
2954 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2955 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2956 DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
2960 port_dpll->pll = intel_find_shared_dpll(state, crtc,
2961 &port_dpll->hw_state,
2963 DPLL_ID_ICL_TBTPLL);
2964 if (!port_dpll->pll) {
2965 DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
2968 intel_reference_shared_dpll(state, crtc,
2969 port_dpll->pll, &port_dpll->hw_state);
2972 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
2973 if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
2974 DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
2975 goto err_unreference_tbt_pll;
2978 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
2980 port_dpll->pll = intel_find_shared_dpll(state, crtc,
2981 &port_dpll->hw_state,
2984 if (!port_dpll->pll) {
2985 DRM_DEBUG_KMS("No MG PHY PLL found\n");
2986 goto err_unreference_tbt_pll;
2988 intel_reference_shared_dpll(state, crtc,
2989 port_dpll->pll, &port_dpll->hw_state);
2991 icl_update_active_dpll(state, crtc, encoder);
2995 err_unreference_tbt_pll:
2996 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2997 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3002 static bool icl_get_dplls(struct intel_atomic_state *state,
3003 struct intel_crtc *crtc,
3004 struct intel_encoder *encoder)
3006 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3007 enum port port = encoder->port;
3009 if (intel_port_is_combophy(dev_priv, port))
3010 return icl_get_combo_phy_dpll(state, crtc, encoder);
3011 else if (intel_port_is_tc(dev_priv, port))
3012 return icl_get_tc_phy_dplls(state, crtc, encoder);
3019 static void icl_put_dplls(struct intel_atomic_state *state,
3020 struct intel_crtc *crtc)
3022 const struct intel_crtc_state *old_crtc_state =
3023 intel_atomic_get_old_crtc_state(state, crtc);
3024 struct intel_crtc_state *new_crtc_state =
3025 intel_atomic_get_new_crtc_state(state, crtc);
3026 enum icl_port_dpll_id id;
3028 new_crtc_state->shared_dpll = NULL;
3030 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3031 const struct icl_port_dpll *old_port_dpll =
3032 &old_crtc_state->icl_port_dplls[id];
3033 struct icl_port_dpll *new_port_dpll =
3034 &new_crtc_state->icl_port_dplls[id];
3036 new_port_dpll->pll = NULL;
3038 if (!old_port_dpll->pll)
3041 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3045 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3046 struct intel_shared_dpll *pll,
3047 struct intel_dpll_hw_state *hw_state)
3049 const enum intel_dpll_id id = pll->info->id;
3050 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3051 intel_wakeref_t wakeref;
3055 wakeref = intel_display_power_get_if_enabled(dev_priv,
3056 POWER_DOMAIN_DISPLAY_CORE);
3060 val = I915_READ(MG_PLL_ENABLE(tc_port));
3061 if (!(val & PLL_ENABLE))
3064 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
3065 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3067 hw_state->mg_clktop2_coreclkctl1 =
3068 I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3069 hw_state->mg_clktop2_coreclkctl1 &=
3070 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3072 hw_state->mg_clktop2_hsclkctl =
3073 I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3074 hw_state->mg_clktop2_hsclkctl &=
3075 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3076 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3077 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3078 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3080 hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
3081 hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
3082 hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
3083 hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
3084 hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
3086 hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
3087 hw_state->mg_pll_tdc_coldst_bias =
3088 I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3090 if (dev_priv->cdclk.hw.ref == 38400) {
3091 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3092 hw_state->mg_pll_bias_mask = 0;
3094 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3095 hw_state->mg_pll_bias_mask = -1U;
3098 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3099 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3103 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3107 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3108 struct intel_shared_dpll *pll,
3109 struct intel_dpll_hw_state *hw_state,
3110 i915_reg_t enable_reg)
3112 const enum intel_dpll_id id = pll->info->id;
3113 intel_wakeref_t wakeref;
3117 wakeref = intel_display_power_get_if_enabled(dev_priv,
3118 POWER_DOMAIN_DISPLAY_CORE);
3122 val = I915_READ(enable_reg);
3123 if (!(val & PLL_ENABLE))
3126 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3127 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3131 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3135 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3136 struct intel_shared_dpll *pll,
3137 struct intel_dpll_hw_state *hw_state)
3139 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3141 if (IS_ELKHARTLAKE(dev_priv) &&
3142 pll->info->id == DPLL_ID_EHL_DPLL4) {
3143 enable_reg = MG_PLL_ENABLE(0);
3146 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3149 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3150 struct intel_shared_dpll *pll,
3151 struct intel_dpll_hw_state *hw_state)
3153 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3156 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3157 struct intel_shared_dpll *pll)
3159 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3160 const enum intel_dpll_id id = pll->info->id;
3162 I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
3163 I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
3164 POSTING_READ(ICL_DPLL_CFGCR1(id));
3167 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3168 struct intel_shared_dpll *pll)
3170 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3171 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3175 * Some of the following registers have reserved fields, so program
3176 * these with RMW based on a mask. The mask can be fixed or generated
3177 * during the calc/readout phase if the mask depends on some other HW
3178 * state like refclk, see icl_calc_mg_pll_state().
3180 val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3181 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3182 val |= hw_state->mg_refclkin_ctl;
3183 I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3185 val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3186 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3187 val |= hw_state->mg_clktop2_coreclkctl1;
3188 I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3190 val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3191 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3192 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3193 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3194 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3195 val |= hw_state->mg_clktop2_hsclkctl;
3196 I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3198 I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3199 I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3200 I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3201 I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3202 I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3204 val = I915_READ(MG_PLL_BIAS(tc_port));
3205 val &= ~hw_state->mg_pll_bias_mask;
3206 val |= hw_state->mg_pll_bias;
3207 I915_WRITE(MG_PLL_BIAS(tc_port), val);
3209 val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3210 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3211 val |= hw_state->mg_pll_tdc_coldst_bias;
3212 I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3214 POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3217 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3218 struct intel_shared_dpll *pll,
3219 i915_reg_t enable_reg)
3223 val = I915_READ(enable_reg);
3224 val |= PLL_POWER_ENABLE;
3225 I915_WRITE(enable_reg, val);
3228 * The spec says we need to "wait" but it also says it should be
3231 if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
3232 PLL_POWER_STATE, PLL_POWER_STATE, 1))
3233 DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3236 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3237 struct intel_shared_dpll *pll,
3238 i915_reg_t enable_reg)
3242 val = I915_READ(enable_reg);
3244 I915_WRITE(enable_reg, val);
3246 /* Timeout is actually 600us. */
3247 if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
3248 PLL_LOCK, PLL_LOCK, 1))
3249 DRM_ERROR("PLL %d not locked\n", pll->info->id);
3252 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3253 struct intel_shared_dpll *pll)
3255 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3257 if (IS_ELKHARTLAKE(dev_priv) &&
3258 pll->info->id == DPLL_ID_EHL_DPLL4) {
3259 enable_reg = MG_PLL_ENABLE(0);
3262 * We need to disable DC states when this DPLL is enabled.
3263 * This can be done by taking a reference on DPLL4 power
3266 pll->wakeref = intel_display_power_get(dev_priv,
3267 POWER_DOMAIN_DPLL_DC_OFF);
3270 icl_pll_power_enable(dev_priv, pll, enable_reg);
3272 icl_dpll_write(dev_priv, pll);
3275 * DVFS pre sequence would be here, but in our driver the cdclk code
3276 * paths should already be setting the appropriate voltage, hence we do
3280 icl_pll_enable(dev_priv, pll, enable_reg);
3282 /* DVFS post sequence would be here. See the comment above. */
3285 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3286 struct intel_shared_dpll *pll)
3288 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3290 icl_dpll_write(dev_priv, pll);
3293 * DVFS pre sequence would be here, but in our driver the cdclk code
3294 * paths should already be setting the appropriate voltage, hence we do
3298 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3300 /* DVFS post sequence would be here. See the comment above. */
3303 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3304 struct intel_shared_dpll *pll)
3306 i915_reg_t enable_reg =
3307 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3309 icl_pll_power_enable(dev_priv, pll, enable_reg);
3311 icl_mg_pll_write(dev_priv, pll);
3314 * DVFS pre sequence would be here, but in our driver the cdclk code
3315 * paths should already be setting the appropriate voltage, hence we do
3319 icl_pll_enable(dev_priv, pll, enable_reg);
3321 /* DVFS post sequence would be here. See the comment above. */
3324 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3325 struct intel_shared_dpll *pll,
3326 i915_reg_t enable_reg)
3330 /* The first steps are done by intel_ddi_post_disable(). */
3333 * DVFS pre sequence would be here, but in our driver the cdclk code
3334 * paths should already be setting the appropriate voltage, hence we do
3338 val = I915_READ(enable_reg);
3340 I915_WRITE(enable_reg, val);
3342 /* Timeout is actually 1us. */
3343 if (intel_wait_for_register(&dev_priv->uncore,
3344 enable_reg, PLL_LOCK, 0, 1))
3345 DRM_ERROR("PLL %d locked\n", pll->info->id);
3347 /* DVFS post sequence would be here. See the comment above. */
3349 val = I915_READ(enable_reg);
3350 val &= ~PLL_POWER_ENABLE;
3351 I915_WRITE(enable_reg, val);
3354 * The spec says we need to "wait" but it also says it should be
3357 if (intel_wait_for_register(&dev_priv->uncore,
3358 enable_reg, PLL_POWER_STATE, 0, 1))
3359 DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3362 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3363 struct intel_shared_dpll *pll)
3365 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3367 if (IS_ELKHARTLAKE(dev_priv) &&
3368 pll->info->id == DPLL_ID_EHL_DPLL4) {
3369 enable_reg = MG_PLL_ENABLE(0);
3370 icl_pll_disable(dev_priv, pll, enable_reg);
3372 intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3377 icl_pll_disable(dev_priv, pll, enable_reg);
3380 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3381 struct intel_shared_dpll *pll)
3383 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3386 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3387 struct intel_shared_dpll *pll)
3389 i915_reg_t enable_reg =
3390 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3392 icl_pll_disable(dev_priv, pll, enable_reg);
3395 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3396 const struct intel_dpll_hw_state *hw_state)
3398 DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3399 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3400 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3401 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3402 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3403 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3404 hw_state->cfgcr0, hw_state->cfgcr1,
3405 hw_state->mg_refclkin_ctl,
3406 hw_state->mg_clktop2_coreclkctl1,
3407 hw_state->mg_clktop2_hsclkctl,
3408 hw_state->mg_pll_div0,
3409 hw_state->mg_pll_div1,
3410 hw_state->mg_pll_lf,
3411 hw_state->mg_pll_frac_lock,
3412 hw_state->mg_pll_ssc,
3413 hw_state->mg_pll_bias,
3414 hw_state->mg_pll_tdc_coldst_bias);
3417 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3418 .enable = combo_pll_enable,
3419 .disable = combo_pll_disable,
3420 .get_hw_state = combo_pll_get_hw_state,
3423 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3424 .enable = tbt_pll_enable,
3425 .disable = tbt_pll_disable,
3426 .get_hw_state = tbt_pll_get_hw_state,
3429 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3430 .enable = mg_pll_enable,
3431 .disable = mg_pll_disable,
3432 .get_hw_state = mg_pll_get_hw_state,
3435 static const struct dpll_info icl_plls[] = {
3436 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3437 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3438 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3439 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3440 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3441 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3442 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3446 static const struct intel_dpll_mgr icl_pll_mgr = {
3447 .dpll_info = icl_plls,
3448 .get_dplls = icl_get_dplls,
3449 .put_dplls = icl_put_dplls,
3450 .update_active_dpll = icl_update_active_dpll,
3451 .dump_hw_state = icl_dump_hw_state,
3454 static const struct dpll_info ehl_plls[] = {
3455 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3456 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3457 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3461 static const struct intel_dpll_mgr ehl_pll_mgr = {
3462 .dpll_info = ehl_plls,
3463 .get_dplls = icl_get_dplls,
3464 .put_dplls = icl_put_dplls,
3465 .dump_hw_state = icl_dump_hw_state,
3469 * intel_shared_dpll_init - Initialize shared DPLLs
3472 * Initialize shared DPLLs for @dev.
3474 void intel_shared_dpll_init(struct drm_device *dev)
3476 struct drm_i915_private *dev_priv = to_i915(dev);
3477 const struct intel_dpll_mgr *dpll_mgr = NULL;
3478 const struct dpll_info *dpll_info;
3481 if (IS_ELKHARTLAKE(dev_priv))
3482 dpll_mgr = &ehl_pll_mgr;
3483 else if (INTEL_GEN(dev_priv) >= 11)
3484 dpll_mgr = &icl_pll_mgr;
3485 else if (IS_CANNONLAKE(dev_priv))
3486 dpll_mgr = &cnl_pll_mgr;
3487 else if (IS_GEN9_BC(dev_priv))
3488 dpll_mgr = &skl_pll_mgr;
3489 else if (IS_GEN9_LP(dev_priv))
3490 dpll_mgr = &bxt_pll_mgr;
3491 else if (HAS_DDI(dev_priv))
3492 dpll_mgr = &hsw_pll_mgr;
3493 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3494 dpll_mgr = &pch_pll_mgr;
3497 dev_priv->num_shared_dpll = 0;
3501 dpll_info = dpll_mgr->dpll_info;
3503 for (i = 0; dpll_info[i].name; i++) {
3504 WARN_ON(i != dpll_info[i].id);
3505 dev_priv->shared_dplls[i].info = &dpll_info[i];
3508 dev_priv->dpll_mgr = dpll_mgr;
3509 dev_priv->num_shared_dpll = i;
3510 mutex_init(&dev_priv->dpll_lock);
3512 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3516 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3517 * @state: atomic state
3518 * @crtc: CRTC to reserve DPLLs for
3521 * This function reserves all required DPLLs for the given CRTC and encoder
3522 * combination in the current atomic commit @state and the new @crtc atomic
3525 * The new configuration in the atomic commit @state is made effective by
3526 * calling intel_shared_dpll_swap_state().
3528 * The reserved DPLLs should be released by calling
3529 * intel_release_shared_dplls().
3532 * True if all required DPLLs were successfully reserved.
3534 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3535 struct intel_crtc *crtc,
3536 struct intel_encoder *encoder)
3538 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3539 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3541 if (WARN_ON(!dpll_mgr))
3544 return dpll_mgr->get_dplls(state, crtc, encoder);
3548 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3549 * @state: atomic state
3550 * @crtc: crtc from which the DPLLs are to be released
3552 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3553 * from the current atomic commit @state and the old @crtc atomic state.
3555 * The new configuration in the atomic commit @state is made effective by
3556 * calling intel_shared_dpll_swap_state().
3558 void intel_release_shared_dplls(struct intel_atomic_state *state,
3559 struct intel_crtc *crtc)
3561 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3562 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3565 * FIXME: this function is called for every platform having a
3566 * compute_clock hook, even though the platform doesn't yet support
3567 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3573 dpll_mgr->put_dplls(state, crtc);
3577 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3578 * @state: atomic state
3579 * @crtc: the CRTC for which to update the active DPLL
3580 * @encoder: encoder determining the type of port DPLL
3582 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3583 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3584 * DPLL selected will be based on the current mode of the encoder's port.
3586 void intel_update_active_dpll(struct intel_atomic_state *state,
3587 struct intel_crtc *crtc,
3588 struct intel_encoder *encoder)
3590 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3591 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3593 if (WARN_ON(!dpll_mgr))
3596 dpll_mgr->update_active_dpll(state, crtc, encoder);
3600 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3601 * @dev_priv: i915 drm device
3602 * @hw_state: hw state to be written to the log
3604 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3606 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3607 const struct intel_dpll_hw_state *hw_state)
3609 if (dev_priv->dpll_mgr) {
3610 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3612 /* fallback for platforms that don't use the shared dpll
3615 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3616 "fp0: 0x%x, fp1: 0x%x\n",