2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
31 * Display PLLs used for driving outputs vary by platform. While some have
32 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33 * from a pool. In the latter scenario, it is possible that multiple pipes
34 * share a PLL if their configurations match.
36 * This file provides an abstraction over display PLLs. The function
37 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
38 * users of a PLL are tracked and that tracking is integrated with the atomic
39 * modset interface. During an atomic operation, required PLLs can be reserved
40 * for a given CRTC and encoder configuration by calling
41 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42 * with intel_release_shared_dplls().
43 * Changes to the users are first staged in the atomic state, and then made
44 * effective by calling intel_shared_dpll_swap_state() during the atomic
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 struct intel_shared_dpll_state *shared_dpll)
54 /* Copy shared dpll state */
55 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
56 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
58 shared_dpll[i] = pll->state;
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
65 struct intel_atomic_state *state = to_intel_atomic_state(s);
67 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
69 if (!state->dpll_set) {
70 state->dpll_set = true;
72 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
76 return state->shared_dpll;
80 * intel_get_shared_dpll_by_id - get a DPLL given its id
81 * @dev_priv: i915 device instance
85 * A pointer to the DPLL with @id
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 enum intel_dpll_id id)
91 return &dev_priv->shared_dplls[id];
95 * intel_get_shared_dpll_id - get the id of a DPLL
96 * @dev_priv: i915 device instance
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 struct intel_shared_dpll *pll)
106 if (drm_WARN_ON(&dev_priv->drm, pll < dev_priv->shared_dplls ||
107 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
110 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
114 void assert_shared_dpll(struct drm_i915_private *dev_priv,
115 struct intel_shared_dpll *pll,
119 struct intel_dpll_hw_state hw_state;
121 if (drm_WARN(&dev_priv->drm, !pll,
122 "asserting DPLL %s with no DPLL\n", onoff(state)))
125 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
126 I915_STATE_WARN(cur_state != state,
127 "%s assertion failure (expected %s, current %s)\n",
128 pll->info->name, onoff(state), onoff(cur_state));
132 * intel_prepare_shared_dpll - call a dpll's prepare hook
133 * @crtc_state: CRTC, and its state, which has a shared dpll
135 * This calls the PLL's prepare hook if it has one and if the PLL is not
136 * already enabled. The prepare hook is platform specific.
138 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
140 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
141 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
142 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
144 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
147 mutex_lock(&dev_priv->dpll_lock);
148 drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
149 if (!pll->active_mask) {
150 drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
151 drm_WARN_ON(&dev_priv->drm, pll->on);
152 assert_shared_dpll_disabled(dev_priv, pll);
154 pll->info->funcs->prepare(dev_priv, pll);
156 mutex_unlock(&dev_priv->dpll_lock);
160 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
161 * @crtc_state: CRTC, and its state, which has a shared DPLL
163 * Enable the shared DPLL used by @crtc.
165 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
167 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
168 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
169 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
170 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
171 unsigned int old_mask;
173 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
176 mutex_lock(&dev_priv->dpll_lock);
177 old_mask = pll->active_mask;
179 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
180 drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
183 pll->active_mask |= crtc_mask;
185 drm_dbg_kms(&dev_priv->drm,
186 "enable %s (active %x, on? %d) for crtc %d\n",
187 pll->info->name, pll->active_mask, pll->on,
191 drm_WARN_ON(&dev_priv->drm, !pll->on);
192 assert_shared_dpll_enabled(dev_priv, pll);
195 drm_WARN_ON(&dev_priv->drm, pll->on);
197 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
198 pll->info->funcs->enable(dev_priv, pll);
202 mutex_unlock(&dev_priv->dpll_lock);
206 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
207 * @crtc_state: CRTC, and its state, which has a shared DPLL
209 * Disable the shared DPLL used by @crtc.
211 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
213 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
214 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
215 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
216 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
218 /* PCH only available on ILK+ */
219 if (INTEL_GEN(dev_priv) < 5)
225 mutex_lock(&dev_priv->dpll_lock);
226 if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
229 drm_dbg_kms(&dev_priv->drm,
230 "disable %s (active %x, on? %d) for crtc %d\n",
231 pll->info->name, pll->active_mask, pll->on,
234 assert_shared_dpll_enabled(dev_priv, pll);
235 drm_WARN_ON(&dev_priv->drm, !pll->on);
237 pll->active_mask &= ~crtc_mask;
238 if (pll->active_mask)
241 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
242 pll->info->funcs->disable(dev_priv, pll);
246 mutex_unlock(&dev_priv->dpll_lock);
249 static struct intel_shared_dpll *
250 intel_find_shared_dpll(struct intel_atomic_state *state,
251 const struct intel_crtc *crtc,
252 const struct intel_dpll_hw_state *pll_state,
253 unsigned long dpll_mask)
255 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
256 struct intel_shared_dpll *pll, *unused_pll = NULL;
257 struct intel_shared_dpll_state *shared_dpll;
258 enum intel_dpll_id i;
260 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
262 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
264 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
265 pll = &dev_priv->shared_dplls[i];
267 /* Only want to check enabled timings first */
268 if (shared_dpll[i].crtc_mask == 0) {
274 if (memcmp(pll_state,
275 &shared_dpll[i].hw_state,
276 sizeof(*pll_state)) == 0) {
277 drm_dbg_kms(&dev_priv->drm,
278 "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
279 crtc->base.base.id, crtc->base.name,
281 shared_dpll[i].crtc_mask,
287 /* Ok no matching timings, maybe there's a free one? */
289 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
290 crtc->base.base.id, crtc->base.name,
291 unused_pll->info->name);
299 intel_reference_shared_dpll(struct intel_atomic_state *state,
300 const struct intel_crtc *crtc,
301 const struct intel_shared_dpll *pll,
302 const struct intel_dpll_hw_state *pll_state)
304 struct drm_i915_private *i915 = to_i915(state->base.dev);
305 struct intel_shared_dpll_state *shared_dpll;
306 const enum intel_dpll_id id = pll->info->id;
308 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
310 if (shared_dpll[id].crtc_mask == 0)
311 shared_dpll[id].hw_state = *pll_state;
313 drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
314 pipe_name(crtc->pipe));
316 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
319 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
320 const struct intel_crtc *crtc,
321 const struct intel_shared_dpll *pll)
323 struct intel_shared_dpll_state *shared_dpll;
325 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
326 shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
329 static void intel_put_dpll(struct intel_atomic_state *state,
330 struct intel_crtc *crtc)
332 const struct intel_crtc_state *old_crtc_state =
333 intel_atomic_get_old_crtc_state(state, crtc);
334 struct intel_crtc_state *new_crtc_state =
335 intel_atomic_get_new_crtc_state(state, crtc);
337 new_crtc_state->shared_dpll = NULL;
339 if (!old_crtc_state->shared_dpll)
342 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
346 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
347 * @state: atomic state
349 * This is the dpll version of drm_atomic_helper_swap_state() since the
350 * helper does not handle driver-specific global state.
352 * For consistency with atomic helpers this function does a complete swap,
353 * i.e. it also puts the current state into @state, even though there is no
354 * need for that at this moment.
356 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
358 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
359 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
360 enum intel_dpll_id i;
362 if (!state->dpll_set)
365 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
366 struct intel_shared_dpll *pll =
367 &dev_priv->shared_dplls[i];
369 swap(pll->state, shared_dpll[i]);
373 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
374 struct intel_shared_dpll *pll,
375 struct intel_dpll_hw_state *hw_state)
377 const enum intel_dpll_id id = pll->info->id;
378 intel_wakeref_t wakeref;
381 wakeref = intel_display_power_get_if_enabled(dev_priv,
382 POWER_DOMAIN_DISPLAY_CORE);
386 val = intel_de_read(dev_priv, PCH_DPLL(id));
387 hw_state->dpll = val;
388 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
389 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
391 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
393 return val & DPLL_VCO_ENABLE;
396 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
397 struct intel_shared_dpll *pll)
399 const enum intel_dpll_id id = pll->info->id;
401 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
402 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
405 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
410 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
412 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
413 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
414 DREF_SUPERSPREAD_SOURCE_MASK));
415 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
418 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
419 struct intel_shared_dpll *pll)
421 const enum intel_dpll_id id = pll->info->id;
423 /* PCH refclock must be enabled first */
424 ibx_assert_pch_refclk_enabled(dev_priv);
426 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
428 /* Wait for the clocks to stabilize. */
429 intel_de_posting_read(dev_priv, PCH_DPLL(id));
432 /* The pixel multiplier can only be updated once the
433 * DPLL is enabled and the clocks are stable.
437 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
438 intel_de_posting_read(dev_priv, PCH_DPLL(id));
442 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
443 struct intel_shared_dpll *pll)
445 const enum intel_dpll_id id = pll->info->id;
447 intel_de_write(dev_priv, PCH_DPLL(id), 0);
448 intel_de_posting_read(dev_priv, PCH_DPLL(id));
452 static bool ibx_get_dpll(struct intel_atomic_state *state,
453 struct intel_crtc *crtc,
454 struct intel_encoder *encoder)
456 struct intel_crtc_state *crtc_state =
457 intel_atomic_get_new_crtc_state(state, crtc);
458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
459 struct intel_shared_dpll *pll;
460 enum intel_dpll_id i;
462 if (HAS_PCH_IBX(dev_priv)) {
463 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
464 i = (enum intel_dpll_id) crtc->pipe;
465 pll = &dev_priv->shared_dplls[i];
467 drm_dbg_kms(&dev_priv->drm,
468 "[CRTC:%d:%s] using pre-allocated %s\n",
469 crtc->base.base.id, crtc->base.name,
472 pll = intel_find_shared_dpll(state, crtc,
473 &crtc_state->dpll_hw_state,
474 BIT(DPLL_ID_PCH_PLL_B) |
475 BIT(DPLL_ID_PCH_PLL_A));
481 /* reference the pll */
482 intel_reference_shared_dpll(state, crtc,
483 pll, &crtc_state->dpll_hw_state);
485 crtc_state->shared_dpll = pll;
490 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
491 const struct intel_dpll_hw_state *hw_state)
493 drm_dbg_kms(&dev_priv->drm,
494 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
495 "fp0: 0x%x, fp1: 0x%x\n",
502 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
503 .prepare = ibx_pch_dpll_prepare,
504 .enable = ibx_pch_dpll_enable,
505 .disable = ibx_pch_dpll_disable,
506 .get_hw_state = ibx_pch_dpll_get_hw_state,
509 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
510 struct intel_shared_dpll *pll)
512 const enum intel_dpll_id id = pll->info->id;
514 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
515 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
519 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
520 struct intel_shared_dpll *pll)
522 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
523 intel_de_posting_read(dev_priv, SPLL_CTL);
527 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
528 struct intel_shared_dpll *pll)
530 const enum intel_dpll_id id = pll->info->id;
533 val = intel_de_read(dev_priv, WRPLL_CTL(id));
534 intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
535 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
538 * Try to set up the PCH reference clock once all DPLLs
539 * that depend on it have been shut down.
541 if (dev_priv->pch_ssc_use & BIT(id))
542 intel_init_pch_refclk(dev_priv);
545 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
546 struct intel_shared_dpll *pll)
548 enum intel_dpll_id id = pll->info->id;
551 val = intel_de_read(dev_priv, SPLL_CTL);
552 intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
553 intel_de_posting_read(dev_priv, SPLL_CTL);
556 * Try to set up the PCH reference clock once all DPLLs
557 * that depend on it have been shut down.
559 if (dev_priv->pch_ssc_use & BIT(id))
560 intel_init_pch_refclk(dev_priv);
563 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
564 struct intel_shared_dpll *pll,
565 struct intel_dpll_hw_state *hw_state)
567 const enum intel_dpll_id id = pll->info->id;
568 intel_wakeref_t wakeref;
571 wakeref = intel_display_power_get_if_enabled(dev_priv,
572 POWER_DOMAIN_DISPLAY_CORE);
576 val = intel_de_read(dev_priv, WRPLL_CTL(id));
577 hw_state->wrpll = val;
579 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
581 return val & WRPLL_PLL_ENABLE;
584 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
585 struct intel_shared_dpll *pll,
586 struct intel_dpll_hw_state *hw_state)
588 intel_wakeref_t wakeref;
591 wakeref = intel_display_power_get_if_enabled(dev_priv,
592 POWER_DOMAIN_DISPLAY_CORE);
596 val = intel_de_read(dev_priv, SPLL_CTL);
597 hw_state->spll = val;
599 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
601 return val & SPLL_PLL_ENABLE;
605 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
611 /* Constraints for PLL good behavior */
617 struct hsw_wrpll_rnp {
621 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
695 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
696 unsigned int r2, unsigned int n2,
698 struct hsw_wrpll_rnp *best)
700 u64 a, b, c, d, diff, diff_best;
702 /* No best (r,n,p) yet */
711 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
715 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
718 * and we would like delta <= budget.
720 * If the discrepancy is above the PPM-based budget, always prefer to
721 * improve upon the previous solution. However, if you're within the
722 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
724 a = freq2k * budget * p * r2;
725 b = freq2k * budget * best->p * best->r2;
726 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
727 diff_best = abs_diff(freq2k * best->p * best->r2,
728 LC_FREQ_2K * best->n2);
730 d = 1000000 * diff_best;
732 if (a < c && b < d) {
733 /* If both are above the budget, pick the closer */
734 if (best->p * best->r2 * diff < p * r2 * diff_best) {
739 } else if (a >= c && b < d) {
740 /* If A is below the threshold but B is above it? Update. */
744 } else if (a >= c && b >= d) {
745 /* Both are below the limit, so pick the higher n2/(r2*r2) */
746 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
752 /* Otherwise a < c && b >= d, do nothing */
756 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
757 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
761 struct hsw_wrpll_rnp best = { 0, 0, 0 };
764 freq2k = clock / 100;
766 budget = hsw_wrpll_get_budget_for_freq(clock);
768 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
769 * and directly pass the LC PLL to it. */
770 if (freq2k == 5400000) {
778 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
781 * We want R so that REF_MIN <= Ref <= REF_MAX.
782 * Injecting R2 = 2 * R gives:
783 * REF_MAX * r2 > LC_FREQ * 2 and
784 * REF_MIN * r2 < LC_FREQ * 2
786 * Which means the desired boundaries for r2 are:
787 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
790 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
791 r2 <= LC_FREQ * 2 / REF_MIN;
795 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
797 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
798 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
799 * VCO_MAX * r2 > n2 * LC_FREQ and
800 * VCO_MIN * r2 < n2 * LC_FREQ)
802 * Which means the desired boundaries for n2 are:
803 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
805 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
806 n2 <= VCO_MAX * r2 / LC_FREQ;
809 for (p = P_MIN; p <= P_MAX; p += P_INC)
810 hsw_wrpll_update_rnp(freq2k, budget,
820 static struct intel_shared_dpll *
821 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
822 struct intel_crtc *crtc)
824 struct intel_crtc_state *crtc_state =
825 intel_atomic_get_new_crtc_state(state, crtc);
826 struct intel_shared_dpll *pll;
828 unsigned int p, n2, r2;
830 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
832 val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
833 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
834 WRPLL_DIVIDER_POST(p);
836 crtc_state->dpll_hw_state.wrpll = val;
838 pll = intel_find_shared_dpll(state, crtc,
839 &crtc_state->dpll_hw_state,
840 BIT(DPLL_ID_WRPLL2) |
841 BIT(DPLL_ID_WRPLL1));
849 static struct intel_shared_dpll *
850 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
852 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
853 struct intel_shared_dpll *pll;
854 enum intel_dpll_id pll_id;
855 int clock = crtc_state->port_clock;
859 pll_id = DPLL_ID_LCPLL_810;
862 pll_id = DPLL_ID_LCPLL_1350;
865 pll_id = DPLL_ID_LCPLL_2700;
868 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
873 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
881 static bool hsw_get_dpll(struct intel_atomic_state *state,
882 struct intel_crtc *crtc,
883 struct intel_encoder *encoder)
885 struct intel_crtc_state *crtc_state =
886 intel_atomic_get_new_crtc_state(state, crtc);
887 struct intel_shared_dpll *pll;
889 memset(&crtc_state->dpll_hw_state, 0,
890 sizeof(crtc_state->dpll_hw_state));
892 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
893 pll = hsw_ddi_hdmi_get_dpll(state, crtc);
894 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
895 pll = hsw_ddi_dp_get_dpll(crtc_state);
896 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
897 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
900 crtc_state->dpll_hw_state.spll =
901 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
903 pll = intel_find_shared_dpll(state, crtc,
904 &crtc_state->dpll_hw_state,
913 intel_reference_shared_dpll(state, crtc,
914 pll, &crtc_state->dpll_hw_state);
916 crtc_state->shared_dpll = pll;
921 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
922 const struct intel_dpll_hw_state *hw_state)
924 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
925 hw_state->wrpll, hw_state->spll);
928 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
929 .enable = hsw_ddi_wrpll_enable,
930 .disable = hsw_ddi_wrpll_disable,
931 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
934 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
935 .enable = hsw_ddi_spll_enable,
936 .disable = hsw_ddi_spll_disable,
937 .get_hw_state = hsw_ddi_spll_get_hw_state,
940 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
941 struct intel_shared_dpll *pll)
945 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
946 struct intel_shared_dpll *pll)
950 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
951 struct intel_shared_dpll *pll,
952 struct intel_dpll_hw_state *hw_state)
957 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
958 .enable = hsw_ddi_lcpll_enable,
959 .disable = hsw_ddi_lcpll_disable,
960 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
963 struct skl_dpll_regs {
964 i915_reg_t ctl, cfgcr1, cfgcr2;
967 /* this array is indexed by the *shared* pll id */
968 static const struct skl_dpll_regs skl_dpll_regs[4] = {
972 /* DPLL 0 doesn't support HDMI mode */
977 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
978 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
983 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
984 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
989 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
990 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
994 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
995 struct intel_shared_dpll *pll)
997 const enum intel_dpll_id id = pll->info->id;
1000 val = intel_de_read(dev_priv, DPLL_CTRL1);
1002 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1003 DPLL_CTRL1_SSC(id) |
1004 DPLL_CTRL1_LINK_RATE_MASK(id));
1005 val |= pll->state.hw_state.ctrl1 << (id * 6);
1007 intel_de_write(dev_priv, DPLL_CTRL1, val);
1008 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1011 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1012 struct intel_shared_dpll *pll)
1014 const struct skl_dpll_regs *regs = skl_dpll_regs;
1015 const enum intel_dpll_id id = pll->info->id;
1017 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1019 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1020 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1021 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1022 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1024 /* the enable bit is always bit 31 */
1025 intel_de_write(dev_priv, regs[id].ctl,
1026 intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1028 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1029 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1032 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1033 struct intel_shared_dpll *pll)
1035 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1038 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1039 struct intel_shared_dpll *pll)
1041 const struct skl_dpll_regs *regs = skl_dpll_regs;
1042 const enum intel_dpll_id id = pll->info->id;
1044 /* the enable bit is always bit 31 */
1045 intel_de_write(dev_priv, regs[id].ctl,
1046 intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1047 intel_de_posting_read(dev_priv, regs[id].ctl);
1050 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1051 struct intel_shared_dpll *pll)
1055 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1056 struct intel_shared_dpll *pll,
1057 struct intel_dpll_hw_state *hw_state)
1060 const struct skl_dpll_regs *regs = skl_dpll_regs;
1061 const enum intel_dpll_id id = pll->info->id;
1062 intel_wakeref_t wakeref;
1065 wakeref = intel_display_power_get_if_enabled(dev_priv,
1066 POWER_DOMAIN_DISPLAY_CORE);
1072 val = intel_de_read(dev_priv, regs[id].ctl);
1073 if (!(val & LCPLL_PLL_ENABLE))
1076 val = intel_de_read(dev_priv, DPLL_CTRL1);
1077 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1079 /* avoid reading back stale values if HDMI mode is not enabled */
1080 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1081 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1082 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1087 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1092 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1093 struct intel_shared_dpll *pll,
1094 struct intel_dpll_hw_state *hw_state)
1096 const struct skl_dpll_regs *regs = skl_dpll_regs;
1097 const enum intel_dpll_id id = pll->info->id;
1098 intel_wakeref_t wakeref;
1102 wakeref = intel_display_power_get_if_enabled(dev_priv,
1103 POWER_DOMAIN_DISPLAY_CORE);
1109 /* DPLL0 is always enabled since it drives CDCLK */
1110 val = intel_de_read(dev_priv, regs[id].ctl);
1111 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1114 val = intel_de_read(dev_priv, DPLL_CTRL1);
1115 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1120 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1125 struct skl_wrpll_context {
1126 u64 min_deviation; /* current minimal deviation */
1127 u64 central_freq; /* chosen central freq */
1128 u64 dco_freq; /* chosen dco freq */
1129 unsigned int p; /* chosen divider */
1132 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1134 memset(ctx, 0, sizeof(*ctx));
1136 ctx->min_deviation = U64_MAX;
1139 /* DCO freq must be within +1%/-6% of the DCO central freq */
1140 #define SKL_DCO_MAX_PDEVIATION 100
1141 #define SKL_DCO_MAX_NDEVIATION 600
1143 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1146 unsigned int divider)
1150 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1153 /* positive deviation */
1154 if (dco_freq >= central_freq) {
1155 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1156 deviation < ctx->min_deviation) {
1157 ctx->min_deviation = deviation;
1158 ctx->central_freq = central_freq;
1159 ctx->dco_freq = dco_freq;
1162 /* negative deviation */
1163 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1164 deviation < ctx->min_deviation) {
1165 ctx->min_deviation = deviation;
1166 ctx->central_freq = central_freq;
1167 ctx->dco_freq = dco_freq;
1172 static void skl_wrpll_get_multipliers(unsigned int p,
1173 unsigned int *p0 /* out */,
1174 unsigned int *p1 /* out */,
1175 unsigned int *p2 /* out */)
1179 unsigned int half = p / 2;
1181 if (half == 1 || half == 2 || half == 3 || half == 5) {
1185 } else if (half % 2 == 0) {
1189 } else if (half % 3 == 0) {
1193 } else if (half % 7 == 0) {
1198 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1202 } else if (p == 5 || p == 7) {
1206 } else if (p == 15) {
1210 } else if (p == 21) {
1214 } else if (p == 35) {
1221 struct skl_wrpll_params {
1231 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1234 u32 p0, u32 p1, u32 p2)
1238 switch (central_freq) {
1240 params->central_freq = 0;
1243 params->central_freq = 1;
1246 params->central_freq = 3;
1263 WARN(1, "Incorrect PDiv\n");
1280 WARN(1, "Incorrect KDiv\n");
1283 params->qdiv_ratio = p1;
1284 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1286 dco_freq = p0 * p1 * p2 * afe_clock;
1289 * Intermediate values are in Hz.
1290 * Divide by MHz to match bsepc
1292 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1293 params->dco_fraction =
1294 div_u64((div_u64(dco_freq, 24) -
1295 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1299 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1300 struct skl_wrpll_params *wrpll_params)
1302 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1303 u64 dco_central_freq[3] = { 8400000000ULL,
1306 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1307 24, 28, 30, 32, 36, 40, 42, 44,
1308 48, 52, 54, 56, 60, 64, 66, 68,
1309 70, 72, 76, 78, 80, 84, 88, 90,
1311 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1312 static const struct {
1316 { even_dividers, ARRAY_SIZE(even_dividers) },
1317 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1319 struct skl_wrpll_context ctx;
1320 unsigned int dco, d, i;
1321 unsigned int p0, p1, p2;
1323 skl_wrpll_context_init(&ctx);
1325 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1326 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1327 for (i = 0; i < dividers[d].n_dividers; i++) {
1328 unsigned int p = dividers[d].list[i];
1329 u64 dco_freq = p * afe_clock;
1331 skl_wrpll_try_divider(&ctx,
1332 dco_central_freq[dco],
1336 * Skip the remaining dividers if we're sure to
1337 * have found the definitive divider, we can't
1338 * improve a 0 deviation.
1340 if (ctx.min_deviation == 0)
1341 goto skip_remaining_dividers;
1345 skip_remaining_dividers:
1347 * If a solution is found with an even divider, prefer
1350 if (d == 0 && ctx.p)
1355 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1360 * gcc incorrectly analyses that these can be used without being
1361 * initialized. To be fair, it's hard to guess.
1364 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1365 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1371 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1373 u32 ctrl1, cfgcr1, cfgcr2;
1374 struct skl_wrpll_params wrpll_params = { 0, };
1377 * See comment in intel_dpll_hw_state to understand why we always use 0
1378 * as the DPLL id in this function.
1380 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1382 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1384 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1388 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1389 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1390 wrpll_params.dco_integer;
1392 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1393 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1394 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1395 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1396 wrpll_params.central_freq;
1398 memset(&crtc_state->dpll_hw_state, 0,
1399 sizeof(crtc_state->dpll_hw_state));
1401 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1402 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1403 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1408 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1413 * See comment in intel_dpll_hw_state to understand why we always use 0
1414 * as the DPLL id in this function.
1416 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1417 switch (crtc_state->port_clock / 2) {
1419 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1422 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1425 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1429 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1432 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1435 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1439 memset(&crtc_state->dpll_hw_state, 0,
1440 sizeof(crtc_state->dpll_hw_state));
1442 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1447 static bool skl_get_dpll(struct intel_atomic_state *state,
1448 struct intel_crtc *crtc,
1449 struct intel_encoder *encoder)
1451 struct intel_crtc_state *crtc_state =
1452 intel_atomic_get_new_crtc_state(state, crtc);
1453 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1454 struct intel_shared_dpll *pll;
1457 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1458 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1460 drm_dbg_kms(&i915->drm,
1461 "Could not get HDMI pll dividers.\n");
1464 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1465 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1467 drm_dbg_kms(&i915->drm,
1468 "Could not set DP dpll HW state.\n");
1475 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1476 pll = intel_find_shared_dpll(state, crtc,
1477 &crtc_state->dpll_hw_state,
1478 BIT(DPLL_ID_SKL_DPLL0));
1480 pll = intel_find_shared_dpll(state, crtc,
1481 &crtc_state->dpll_hw_state,
1482 BIT(DPLL_ID_SKL_DPLL3) |
1483 BIT(DPLL_ID_SKL_DPLL2) |
1484 BIT(DPLL_ID_SKL_DPLL1));
1488 intel_reference_shared_dpll(state, crtc,
1489 pll, &crtc_state->dpll_hw_state);
1491 crtc_state->shared_dpll = pll;
1496 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1497 const struct intel_dpll_hw_state *hw_state)
1499 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1500 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1506 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1507 .enable = skl_ddi_pll_enable,
1508 .disable = skl_ddi_pll_disable,
1509 .get_hw_state = skl_ddi_pll_get_hw_state,
1512 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1513 .enable = skl_ddi_dpll0_enable,
1514 .disable = skl_ddi_dpll0_disable,
1515 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1518 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1519 struct intel_shared_dpll *pll)
1522 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1524 enum dpio_channel ch;
1526 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1528 /* Non-SSC reference */
1529 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1530 temp |= PORT_PLL_REF_SEL;
1531 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1533 if (IS_GEMINILAKE(dev_priv)) {
1534 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1535 temp |= PORT_PLL_POWER_ENABLE;
1536 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1538 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1539 PORT_PLL_POWER_STATE), 200))
1540 drm_err(&dev_priv->drm,
1541 "Power state not set for PLL:%d\n", port);
1544 /* Disable 10 bit clock */
1545 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1546 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1547 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1550 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1551 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1552 temp |= pll->state.hw_state.ebb0;
1553 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1555 /* Write M2 integer */
1556 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1557 temp &= ~PORT_PLL_M2_MASK;
1558 temp |= pll->state.hw_state.pll0;
1559 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1562 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1563 temp &= ~PORT_PLL_N_MASK;
1564 temp |= pll->state.hw_state.pll1;
1565 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1567 /* Write M2 fraction */
1568 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1569 temp &= ~PORT_PLL_M2_FRAC_MASK;
1570 temp |= pll->state.hw_state.pll2;
1571 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1573 /* Write M2 fraction enable */
1574 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1575 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1576 temp |= pll->state.hw_state.pll3;
1577 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1580 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1581 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1582 temp &= ~PORT_PLL_INT_COEFF_MASK;
1583 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1584 temp |= pll->state.hw_state.pll6;
1585 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1587 /* Write calibration val */
1588 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1589 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1590 temp |= pll->state.hw_state.pll8;
1591 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1593 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1594 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1595 temp |= pll->state.hw_state.pll9;
1596 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1598 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1599 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1600 temp &= ~PORT_PLL_DCO_AMP_MASK;
1601 temp |= pll->state.hw_state.pll10;
1602 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1604 /* Recalibrate with new settings */
1605 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1606 temp |= PORT_PLL_RECALIBRATE;
1607 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1608 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1609 temp |= pll->state.hw_state.ebb4;
1610 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1613 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1614 temp |= PORT_PLL_ENABLE;
1615 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1616 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1618 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1620 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1622 if (IS_GEMINILAKE(dev_priv)) {
1623 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1624 temp |= DCC_DELAY_RANGE_2;
1625 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1629 * While we write to the group register to program all lanes at once we
1630 * can read only lane registers and we pick lanes 0/1 for that.
1632 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1633 temp &= ~LANE_STAGGER_MASK;
1634 temp &= ~LANESTAGGER_STRAP_OVRD;
1635 temp |= pll->state.hw_state.pcsdw12;
1636 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1639 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1640 struct intel_shared_dpll *pll)
1642 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1645 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1646 temp &= ~PORT_PLL_ENABLE;
1647 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1648 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1650 if (IS_GEMINILAKE(dev_priv)) {
1651 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1652 temp &= ~PORT_PLL_POWER_ENABLE;
1653 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1655 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1656 PORT_PLL_POWER_STATE), 200))
1657 drm_err(&dev_priv->drm,
1658 "Power state not reset for PLL:%d\n", port);
1662 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1663 struct intel_shared_dpll *pll,
1664 struct intel_dpll_hw_state *hw_state)
1666 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1667 intel_wakeref_t wakeref;
1669 enum dpio_channel ch;
1673 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1675 wakeref = intel_display_power_get_if_enabled(dev_priv,
1676 POWER_DOMAIN_DISPLAY_CORE);
1682 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1683 if (!(val & PORT_PLL_ENABLE))
1686 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1687 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1689 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1690 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1692 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1693 hw_state->pll0 &= PORT_PLL_M2_MASK;
1695 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1696 hw_state->pll1 &= PORT_PLL_N_MASK;
1698 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1699 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1701 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1702 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1704 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1705 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1706 PORT_PLL_INT_COEFF_MASK |
1707 PORT_PLL_GAIN_CTL_MASK;
1709 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1710 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1712 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1713 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1715 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1716 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1717 PORT_PLL_DCO_AMP_MASK;
1720 * While we write to the group register to program all lanes at once we
1721 * can read only lane registers. We configure all lanes the same way, so
1722 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1724 hw_state->pcsdw12 = intel_de_read(dev_priv,
1725 BXT_PORT_PCS_DW12_LN01(phy, ch));
1726 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1727 drm_dbg(&dev_priv->drm,
1728 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1730 intel_de_read(dev_priv,
1731 BXT_PORT_PCS_DW12_LN23(phy, ch)));
1732 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1737 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1742 /* bxt clock parameters */
1743 struct bxt_clk_div {
1755 /* pre-calculated values for DP linkrates */
1756 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1757 {162000, 4, 2, 32, 1677722, 1, 1},
1758 {270000, 4, 1, 27, 0, 0, 1},
1759 {540000, 2, 1, 27, 0, 0, 1},
1760 {216000, 3, 2, 32, 1677722, 1, 1},
1761 {243000, 4, 1, 24, 1258291, 1, 1},
1762 {324000, 4, 1, 32, 1677722, 1, 1},
1763 {432000, 3, 1, 32, 1677722, 1, 1}
1767 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1768 struct bxt_clk_div *clk_div)
1770 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1771 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1772 struct dpll best_clock;
1774 /* Calculate HDMI div */
1776 * FIXME: tie the following calculation into
1777 * i9xx_crtc_compute_clock
1779 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1780 drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
1781 crtc_state->port_clock,
1782 pipe_name(crtc->pipe));
1786 clk_div->p1 = best_clock.p1;
1787 clk_div->p2 = best_clock.p2;
1788 WARN_ON(best_clock.m1 != 2);
1789 clk_div->n = best_clock.n;
1790 clk_div->m2_int = best_clock.m2 >> 22;
1791 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1792 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1794 clk_div->vco = best_clock.vco;
1799 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1800 struct bxt_clk_div *clk_div)
1802 int clock = crtc_state->port_clock;
1805 *clk_div = bxt_dp_clk_val[0];
1806 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1807 if (bxt_dp_clk_val[i].clock == clock) {
1808 *clk_div = bxt_dp_clk_val[i];
1813 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1816 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1817 const struct bxt_clk_div *clk_div)
1819 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1820 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1821 int clock = crtc_state->port_clock;
1822 int vco = clk_div->vco;
1823 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1826 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1828 if (vco >= 6200000 && vco <= 6700000) {
1833 } else if ((vco > 5400000 && vco < 6200000) ||
1834 (vco >= 4800000 && vco < 5400000)) {
1839 } else if (vco == 5400000) {
1845 drm_err(&i915->drm, "Invalid VCO\n");
1851 else if (clock > 135000)
1853 else if (clock > 67000)
1855 else if (clock > 33000)
1860 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1861 dpll_hw_state->pll0 = clk_div->m2_int;
1862 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1863 dpll_hw_state->pll2 = clk_div->m2_frac;
1865 if (clk_div->m2_frac_en)
1866 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1868 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1869 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1871 dpll_hw_state->pll8 = targ_cnt;
1873 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1875 dpll_hw_state->pll10 =
1876 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1877 | PORT_PLL_DCO_AMP_OVR_EN_H;
1879 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1881 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1887 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1889 struct bxt_clk_div clk_div = {};
1891 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1893 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1897 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1899 struct bxt_clk_div clk_div = {};
1901 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1903 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1906 static bool bxt_get_dpll(struct intel_atomic_state *state,
1907 struct intel_crtc *crtc,
1908 struct intel_encoder *encoder)
1910 struct intel_crtc_state *crtc_state =
1911 intel_atomic_get_new_crtc_state(state, crtc);
1912 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1913 struct intel_shared_dpll *pll;
1914 enum intel_dpll_id id;
1916 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1917 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1920 if (intel_crtc_has_dp_encoder(crtc_state) &&
1921 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1924 /* 1:1 mapping between ports and PLLs */
1925 id = (enum intel_dpll_id) encoder->port;
1926 pll = intel_get_shared_dpll_by_id(dev_priv, id);
1928 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
1929 crtc->base.base.id, crtc->base.name, pll->info->name);
1931 intel_reference_shared_dpll(state, crtc,
1932 pll, &crtc_state->dpll_hw_state);
1934 crtc_state->shared_dpll = pll;
1939 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1940 const struct intel_dpll_hw_state *hw_state)
1942 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1943 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1944 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1958 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1959 .enable = bxt_ddi_pll_enable,
1960 .disable = bxt_ddi_pll_disable,
1961 .get_hw_state = bxt_ddi_pll_get_hw_state,
1964 struct intel_dpll_mgr {
1965 const struct dpll_info *dpll_info;
1967 bool (*get_dplls)(struct intel_atomic_state *state,
1968 struct intel_crtc *crtc,
1969 struct intel_encoder *encoder);
1970 void (*put_dplls)(struct intel_atomic_state *state,
1971 struct intel_crtc *crtc);
1972 void (*update_active_dpll)(struct intel_atomic_state *state,
1973 struct intel_crtc *crtc,
1974 struct intel_encoder *encoder);
1975 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1976 const struct intel_dpll_hw_state *hw_state);
1979 static const struct dpll_info pch_plls[] = {
1980 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1981 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1985 static const struct intel_dpll_mgr pch_pll_mgr = {
1986 .dpll_info = pch_plls,
1987 .get_dplls = ibx_get_dpll,
1988 .put_dplls = intel_put_dpll,
1989 .dump_hw_state = ibx_dump_hw_state,
1992 static const struct dpll_info hsw_plls[] = {
1993 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1994 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1995 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1996 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1997 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1998 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
2002 static const struct intel_dpll_mgr hsw_pll_mgr = {
2003 .dpll_info = hsw_plls,
2004 .get_dplls = hsw_get_dpll,
2005 .put_dplls = intel_put_dpll,
2006 .dump_hw_state = hsw_dump_hw_state,
2009 static const struct dpll_info skl_plls[] = {
2010 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
2011 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2012 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2013 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
2017 static const struct intel_dpll_mgr skl_pll_mgr = {
2018 .dpll_info = skl_plls,
2019 .get_dplls = skl_get_dpll,
2020 .put_dplls = intel_put_dpll,
2021 .dump_hw_state = skl_dump_hw_state,
2024 static const struct dpll_info bxt_plls[] = {
2025 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2026 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2027 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2031 static const struct intel_dpll_mgr bxt_pll_mgr = {
2032 .dpll_info = bxt_plls,
2033 .get_dplls = bxt_get_dpll,
2034 .put_dplls = intel_put_dpll,
2035 .dump_hw_state = bxt_dump_hw_state,
2038 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2039 struct intel_shared_dpll *pll)
2041 const enum intel_dpll_id id = pll->info->id;
2044 /* 1. Enable DPLL power in DPLL_ENABLE. */
2045 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2046 val |= PLL_POWER_ENABLE;
2047 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2049 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2050 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2051 PLL_POWER_STATE, 5))
2052 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2055 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2056 * select DP mode, and set DP link rate.
2058 val = pll->state.hw_state.cfgcr0;
2059 intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2061 /* 4. Reab back to ensure writes completed */
2062 intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2064 /* 3. Configure DPLL_CFGCR0 */
2065 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
2066 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2067 val = pll->state.hw_state.cfgcr1;
2068 intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2069 /* 4. Reab back to ensure writes completed */
2070 intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2074 * 5. If the frequency will result in a change to the voltage
2075 * requirement, follow the Display Voltage Frequency Switching
2076 * Sequence Before Frequency Change
2078 * Note: DVFS is actually handled via the cdclk code paths,
2079 * hence we do nothing here.
2082 /* 6. Enable DPLL in DPLL_ENABLE. */
2083 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2085 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2087 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2088 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2089 drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2092 * 8. If the frequency will result in a change to the voltage
2093 * requirement, follow the Display Voltage Frequency Switching
2094 * Sequence After Frequency Change
2096 * Note: DVFS is actually handled via the cdclk code paths,
2097 * hence we do nothing here.
2101 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2102 * Done at intel_ddi_clk_select
2106 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2107 struct intel_shared_dpll *pll)
2109 const enum intel_dpll_id id = pll->info->id;
2113 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2114 * Done at intel_ddi_post_disable
2118 * 2. If the frequency will result in a change to the voltage
2119 * requirement, follow the Display Voltage Frequency Switching
2120 * Sequence Before Frequency Change
2122 * Note: DVFS is actually handled via the cdclk code paths,
2123 * hence we do nothing here.
2126 /* 3. Disable DPLL through DPLL_ENABLE. */
2127 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2129 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2131 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2132 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2133 drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2136 * 5. If the frequency will result in a change to the voltage
2137 * requirement, follow the Display Voltage Frequency Switching
2138 * Sequence After Frequency Change
2140 * Note: DVFS is actually handled via the cdclk code paths,
2141 * hence we do nothing here.
2144 /* 6. Disable DPLL power in DPLL_ENABLE. */
2145 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2146 val &= ~PLL_POWER_ENABLE;
2147 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2149 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2150 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2151 PLL_POWER_STATE, 5))
2152 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2155 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2156 struct intel_shared_dpll *pll,
2157 struct intel_dpll_hw_state *hw_state)
2159 const enum intel_dpll_id id = pll->info->id;
2160 intel_wakeref_t wakeref;
2164 wakeref = intel_display_power_get_if_enabled(dev_priv,
2165 POWER_DOMAIN_DISPLAY_CORE);
2171 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2172 if (!(val & PLL_ENABLE))
2175 val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2176 hw_state->cfgcr0 = val;
2178 /* avoid reading back stale values if HDMI mode is not enabled */
2179 if (val & DPLL_CFGCR0_HDMI_MODE) {
2180 hw_state->cfgcr1 = intel_de_read(dev_priv,
2181 CNL_DPLL_CFGCR1(id));
2186 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2191 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2192 int *qdiv, int *kdiv)
2195 if (bestdiv % 2 == 0) {
2200 } else if (bestdiv % 4 == 0) {
2202 *qdiv = bestdiv / 4;
2204 } else if (bestdiv % 6 == 0) {
2206 *qdiv = bestdiv / 6;
2208 } else if (bestdiv % 5 == 0) {
2210 *qdiv = bestdiv / 10;
2212 } else if (bestdiv % 14 == 0) {
2214 *qdiv = bestdiv / 14;
2218 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2222 } else { /* 9, 15, 21 */
2223 *pdiv = bestdiv / 3;
2230 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2231 u32 dco_freq, u32 ref_freq,
2232 int pdiv, int qdiv, int kdiv)
2247 WARN(1, "Incorrect KDiv\n");
2264 WARN(1, "Incorrect PDiv\n");
2267 WARN_ON(kdiv != 2 && qdiv != 1);
2269 params->qdiv_ratio = qdiv;
2270 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2272 dco = div_u64((u64)dco_freq << 15, ref_freq);
2274 params->dco_integer = dco >> 15;
2275 params->dco_fraction = dco & 0x7fff;
2278 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2280 int ref_clock = dev_priv->cdclk.hw.ref;
2283 * For ICL+, the spec states: if reference frequency is 38.4,
2284 * use 19.2 because the DPLL automatically divides that by 2.
2286 if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2293 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2294 struct skl_wrpll_params *wrpll_params)
2296 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2297 u32 afe_clock = crtc_state->port_clock * 5;
2299 u32 dco_min = 7998000;
2300 u32 dco_max = 10000000;
2301 u32 dco_mid = (dco_min + dco_max) / 2;
2302 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2303 18, 20, 24, 28, 30, 32, 36, 40,
2304 42, 44, 48, 50, 52, 54, 56, 60,
2305 64, 66, 68, 70, 72, 76, 78, 80,
2306 84, 88, 90, 92, 96, 98, 100, 102,
2307 3, 5, 7, 9, 15, 21 };
2308 u32 dco, best_dco = 0, dco_centrality = 0;
2309 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2310 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2312 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2313 dco = afe_clock * dividers[d];
2315 if ((dco <= dco_max) && (dco >= dco_min)) {
2316 dco_centrality = abs(dco - dco_mid);
2318 if (dco_centrality < best_dco_centrality) {
2319 best_dco_centrality = dco_centrality;
2320 best_div = dividers[d];
2329 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2331 ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2333 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2339 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2342 struct skl_wrpll_params wrpll_params = { 0, };
2344 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2346 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2349 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2350 wrpll_params.dco_integer;
2352 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2353 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2354 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2355 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2356 DPLL_CFGCR1_CENTRAL_FREQ;
2358 memset(&crtc_state->dpll_hw_state, 0,
2359 sizeof(crtc_state->dpll_hw_state));
2361 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2362 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2367 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2371 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2373 switch (crtc_state->port_clock / 2) {
2375 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2378 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2381 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2385 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2388 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2391 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2394 /* Some SKUs may require elevated I/O voltage to support this */
2395 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2398 /* Some SKUs may require elevated I/O voltage to support this */
2399 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2403 memset(&crtc_state->dpll_hw_state, 0,
2404 sizeof(crtc_state->dpll_hw_state));
2406 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2411 static bool cnl_get_dpll(struct intel_atomic_state *state,
2412 struct intel_crtc *crtc,
2413 struct intel_encoder *encoder)
2415 struct intel_crtc_state *crtc_state =
2416 intel_atomic_get_new_crtc_state(state, crtc);
2417 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2418 struct intel_shared_dpll *pll;
2421 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2422 bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2424 drm_dbg_kms(&i915->drm,
2425 "Could not get HDMI pll dividers.\n");
2428 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2429 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2431 drm_dbg_kms(&i915->drm,
2432 "Could not set DP dpll HW state.\n");
2436 drm_dbg_kms(&i915->drm,
2437 "Skip DPLL setup for output_types 0x%x\n",
2438 crtc_state->output_types);
2442 pll = intel_find_shared_dpll(state, crtc,
2443 &crtc_state->dpll_hw_state,
2444 BIT(DPLL_ID_SKL_DPLL2) |
2445 BIT(DPLL_ID_SKL_DPLL1) |
2446 BIT(DPLL_ID_SKL_DPLL0));
2448 drm_dbg_kms(&i915->drm, "No PLL selected\n");
2452 intel_reference_shared_dpll(state, crtc,
2453 pll, &crtc_state->dpll_hw_state);
2455 crtc_state->shared_dpll = pll;
2460 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2461 const struct intel_dpll_hw_state *hw_state)
2463 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2464 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2469 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2470 .enable = cnl_ddi_pll_enable,
2471 .disable = cnl_ddi_pll_disable,
2472 .get_hw_state = cnl_ddi_pll_get_hw_state,
2475 static const struct dpll_info cnl_plls[] = {
2476 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2477 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2478 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2482 static const struct intel_dpll_mgr cnl_pll_mgr = {
2483 .dpll_info = cnl_plls,
2484 .get_dplls = cnl_get_dpll,
2485 .put_dplls = intel_put_dpll,
2486 .dump_hw_state = cnl_dump_hw_state,
2489 struct icl_combo_pll_params {
2491 struct skl_wrpll_params wrpll;
2495 * These values alrea already adjusted: they're the bits we write to the
2496 * registers, not the logical values.
2498 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2500 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2501 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2503 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2504 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2506 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2507 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2509 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2510 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2513 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2515 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2516 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2518 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2519 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2521 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2522 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2526 /* Also used for 38.4 MHz values. */
2527 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2529 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2530 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2532 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2533 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2535 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2536 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2538 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2539 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2541 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2542 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2544 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2545 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2547 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2548 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2550 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2551 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2554 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2555 .dco_integer = 0x151, .dco_fraction = 0x4000,
2556 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2559 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2560 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2561 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2564 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2565 .dco_integer = 0x54, .dco_fraction = 0x3000,
2566 /* the following params are unused */
2567 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2570 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2571 .dco_integer = 0x43, .dco_fraction = 0x4000,
2572 /* the following params are unused */
2573 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2576 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2577 struct skl_wrpll_params *pll_params)
2579 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2580 const struct icl_combo_pll_params *params =
2581 dev_priv->cdclk.hw.ref == 24000 ?
2582 icl_dp_combo_pll_24MHz_values :
2583 icl_dp_combo_pll_19_2MHz_values;
2584 int clock = crtc_state->port_clock;
2587 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2588 if (clock == params[i].clock) {
2589 *pll_params = params[i].wrpll;
2594 MISSING_CASE(clock);
2598 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2599 struct skl_wrpll_params *pll_params)
2601 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2603 if (INTEL_GEN(dev_priv) >= 12) {
2604 switch (dev_priv->cdclk.hw.ref) {
2606 MISSING_CASE(dev_priv->cdclk.hw.ref);
2610 *pll_params = tgl_tbt_pll_19_2MHz_values;
2613 *pll_params = tgl_tbt_pll_24MHz_values;
2617 switch (dev_priv->cdclk.hw.ref) {
2619 MISSING_CASE(dev_priv->cdclk.hw.ref);
2623 *pll_params = icl_tbt_pll_19_2MHz_values;
2626 *pll_params = icl_tbt_pll_24MHz_values;
2634 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2635 struct intel_encoder *encoder,
2636 struct intel_dpll_hw_state *pll_state)
2638 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2640 struct skl_wrpll_params pll_params = { 0 };
2643 if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2645 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2646 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2647 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2648 ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2650 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2655 cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2656 pll_params.dco_integer;
2658 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2659 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2660 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2661 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2663 if (INTEL_GEN(dev_priv) >= 12)
2664 cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2666 cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2668 memset(pll_state, 0, sizeof(*pll_state));
2670 pll_state->cfgcr0 = cfgcr0;
2671 pll_state->cfgcr1 = cfgcr1;
2677 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2679 return id - DPLL_ID_ICL_MGPLL1;
2682 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2684 return tc_port + DPLL_ID_ICL_MGPLL1;
2687 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2688 u32 *target_dco_khz,
2689 struct intel_dpll_hw_state *state,
2692 u32 dco_min_freq, dco_max_freq;
2693 int div1_vals[] = {7, 5, 3, 2};
2697 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2698 dco_max_freq = is_dp ? 8100000 : 10000000;
2700 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2701 int div1 = div1_vals[i];
2703 for (div2 = 10; div2 > 0; div2--) {
2704 int dco = div1 * div2 * clock_khz * 5;
2705 int a_divratio, tlinedrv, inputsel;
2708 if (dco < dco_min_freq || dco > dco_max_freq)
2713 * Note: a_divratio not matching TGL BSpec
2714 * algorithm but matching hardcoded values and
2715 * working on HW for DP alt-mode at least
2717 a_divratio = is_dp ? 10 : 5;
2718 tlinedrv = is_dkl ? 1 : 2;
2723 inputsel = is_dp ? 0 : 1;
2730 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2733 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2736 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2739 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2743 *target_dco_khz = dco;
2745 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2747 state->mg_clktop2_coreclkctl1 =
2748 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2750 state->mg_clktop2_hsclkctl =
2751 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2752 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2754 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2764 * The specification for this function uses real numbers, so the math had to be
2765 * adapted to integer-only calculation, that's why it looks so different.
2767 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2768 struct intel_dpll_hw_state *pll_state)
2770 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2771 int refclk_khz = dev_priv->cdclk.hw.ref;
2772 int clock = crtc_state->port_clock;
2773 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2774 u32 iref_ndiv, iref_trim, iref_pulse_w;
2775 u32 prop_coeff, int_coeff;
2776 u32 tdc_targetcnt, feedfwgain;
2777 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2779 bool use_ssc = false;
2780 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2781 bool is_dkl = INTEL_GEN(dev_priv) >= 12;
2783 memset(pll_state, 0, sizeof(*pll_state));
2785 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2786 pll_state, is_dkl)) {
2787 drm_dbg_kms(&dev_priv->drm,
2788 "Failed to find divisors for clock %d\n", clock);
2793 m2div_int = dco_khz / (refclk_khz * m1div);
2794 if (m2div_int > 255) {
2797 m2div_int = dco_khz / (refclk_khz * m1div);
2800 if (m2div_int > 255) {
2801 drm_dbg_kms(&dev_priv->drm,
2802 "Failed to find mdiv for clock %d\n",
2807 m2div_rem = dco_khz % (refclk_khz * m1div);
2809 tmp = (u64)m2div_rem * (1 << 22);
2810 do_div(tmp, refclk_khz * m1div);
2813 switch (refclk_khz) {
2830 MISSING_CASE(refclk_khz);
2835 * tdc_res = 0.000003
2836 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2838 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2839 * was supposed to be a division, but we rearranged the operations of
2840 * the formula to avoid early divisions so we don't multiply the
2843 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2844 * we also rearrange to work with integers.
2846 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2847 * last division by 10.
2849 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2852 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2853 * 32 bits. That's not a problem since we round the division down
2856 feedfwgain = (use_ssc || m2div_rem > 0) ?
2857 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2859 if (dco_khz >= 9000000) {
2868 tmp = mul_u32_u32(dco_khz, 47 * 32);
2869 do_div(tmp, refclk_khz * m1div * 10000);
2872 tmp = mul_u32_u32(dco_khz, 1000);
2873 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2880 /* write pll_state calculations */
2882 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2883 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2884 DKL_PLL_DIV0_FBPREDIV(m1div) |
2885 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2887 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2888 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2890 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2891 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2892 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2893 (use_ssc ? DKL_PLL_SSC_EN : 0);
2895 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2896 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2898 pll_state->mg_pll_tdc_coldst_bias =
2899 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2900 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2903 pll_state->mg_pll_div0 =
2904 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2905 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2906 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2908 pll_state->mg_pll_div1 =
2909 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2910 MG_PLL_DIV1_DITHER_DIV_2 |
2911 MG_PLL_DIV1_NDIVRATIO(1) |
2912 MG_PLL_DIV1_FBPREDIV(m1div);
2914 pll_state->mg_pll_lf =
2915 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2916 MG_PLL_LF_AFCCNTSEL_512 |
2917 MG_PLL_LF_GAINCTRL(1) |
2918 MG_PLL_LF_INT_COEFF(int_coeff) |
2919 MG_PLL_LF_PROP_COEFF(prop_coeff);
2921 pll_state->mg_pll_frac_lock =
2922 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2923 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2924 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2925 MG_PLL_FRAC_LOCK_DCODITHEREN |
2926 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2927 if (use_ssc || m2div_rem > 0)
2928 pll_state->mg_pll_frac_lock |=
2929 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2931 pll_state->mg_pll_ssc =
2932 (use_ssc ? MG_PLL_SSC_EN : 0) |
2933 MG_PLL_SSC_TYPE(2) |
2934 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2935 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2937 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2939 pll_state->mg_pll_tdc_coldst_bias =
2940 MG_PLL_TDC_COLDST_COLDSTART |
2941 MG_PLL_TDC_COLDST_IREFINT_EN |
2942 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2943 MG_PLL_TDC_TDCOVCCORR_EN |
2944 MG_PLL_TDC_TDCSEL(3);
2946 pll_state->mg_pll_bias =
2947 MG_PLL_BIAS_BIAS_GB_SEL(3) |
2948 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2949 MG_PLL_BIAS_BIAS_BONUS(10) |
2950 MG_PLL_BIAS_BIASCAL_EN |
2951 MG_PLL_BIAS_CTRIM(12) |
2952 MG_PLL_BIAS_VREF_RDAC(4) |
2953 MG_PLL_BIAS_IREFTRIM(iref_trim);
2955 if (refclk_khz == 38400) {
2956 pll_state->mg_pll_tdc_coldst_bias_mask =
2957 MG_PLL_TDC_COLDST_COLDSTART;
2958 pll_state->mg_pll_bias_mask = 0;
2960 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2961 pll_state->mg_pll_bias_mask = -1U;
2964 pll_state->mg_pll_tdc_coldst_bias &=
2965 pll_state->mg_pll_tdc_coldst_bias_mask;
2966 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2973 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2974 * @crtc_state: state for the CRTC to select the DPLL for
2975 * @port_dpll_id: the active @port_dpll_id to select
2977 * Select the given @port_dpll_id instance from the DPLLs reserved for the
2980 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2981 enum icl_port_dpll_id port_dpll_id)
2983 struct icl_port_dpll *port_dpll =
2984 &crtc_state->icl_port_dplls[port_dpll_id];
2986 crtc_state->shared_dpll = port_dpll->pll;
2987 crtc_state->dpll_hw_state = port_dpll->hw_state;
2990 static void icl_update_active_dpll(struct intel_atomic_state *state,
2991 struct intel_crtc *crtc,
2992 struct intel_encoder *encoder)
2994 struct intel_crtc_state *crtc_state =
2995 intel_atomic_get_new_crtc_state(state, crtc);
2996 struct intel_digital_port *primary_port;
2997 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2999 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3000 enc_to_mst(encoder)->primary :
3001 enc_to_dig_port(encoder);
3004 (primary_port->tc_mode == TC_PORT_DP_ALT ||
3005 primary_port->tc_mode == TC_PORT_LEGACY))
3006 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3008 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3011 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3012 struct intel_crtc *crtc,
3013 struct intel_encoder *encoder)
3015 struct intel_crtc_state *crtc_state =
3016 intel_atomic_get_new_crtc_state(state, crtc);
3017 struct icl_port_dpll *port_dpll =
3018 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3019 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3020 enum port port = encoder->port;
3021 unsigned long dpll_mask;
3023 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3024 drm_dbg_kms(&dev_priv->drm,
3025 "Could not calculate combo PHY PLL state.\n");
3030 if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
3032 BIT(DPLL_ID_EHL_DPLL4) |
3033 BIT(DPLL_ID_ICL_DPLL1) |
3034 BIT(DPLL_ID_ICL_DPLL0);
3036 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3038 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3039 &port_dpll->hw_state,
3041 if (!port_dpll->pll) {
3042 drm_dbg_kms(&dev_priv->drm,
3043 "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3044 encoder->base.base.id, encoder->base.name);
3048 intel_reference_shared_dpll(state, crtc,
3049 port_dpll->pll, &port_dpll->hw_state);
3051 icl_update_active_dpll(state, crtc, encoder);
3056 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3057 struct intel_crtc *crtc,
3058 struct intel_encoder *encoder)
3060 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3061 struct intel_crtc_state *crtc_state =
3062 intel_atomic_get_new_crtc_state(state, crtc);
3063 struct icl_port_dpll *port_dpll;
3064 enum intel_dpll_id dpll_id;
3066 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3067 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3068 drm_dbg_kms(&dev_priv->drm,
3069 "Could not calculate TBT PLL state.\n");
3073 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3074 &port_dpll->hw_state,
3075 BIT(DPLL_ID_ICL_TBTPLL));
3076 if (!port_dpll->pll) {
3077 drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3080 intel_reference_shared_dpll(state, crtc,
3081 port_dpll->pll, &port_dpll->hw_state);
3084 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3085 if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3086 drm_dbg_kms(&dev_priv->drm,
3087 "Could not calculate MG PHY PLL state.\n");
3088 goto err_unreference_tbt_pll;
3091 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3093 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3094 &port_dpll->hw_state,
3096 if (!port_dpll->pll) {
3097 drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3098 goto err_unreference_tbt_pll;
3100 intel_reference_shared_dpll(state, crtc,
3101 port_dpll->pll, &port_dpll->hw_state);
3103 icl_update_active_dpll(state, crtc, encoder);
3107 err_unreference_tbt_pll:
3108 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3109 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3114 static bool icl_get_dplls(struct intel_atomic_state *state,
3115 struct intel_crtc *crtc,
3116 struct intel_encoder *encoder)
3118 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3119 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3121 if (intel_phy_is_combo(dev_priv, phy))
3122 return icl_get_combo_phy_dpll(state, crtc, encoder);
3123 else if (intel_phy_is_tc(dev_priv, phy))
3124 return icl_get_tc_phy_dplls(state, crtc, encoder);
3131 static void icl_put_dplls(struct intel_atomic_state *state,
3132 struct intel_crtc *crtc)
3134 const struct intel_crtc_state *old_crtc_state =
3135 intel_atomic_get_old_crtc_state(state, crtc);
3136 struct intel_crtc_state *new_crtc_state =
3137 intel_atomic_get_new_crtc_state(state, crtc);
3138 enum icl_port_dpll_id id;
3140 new_crtc_state->shared_dpll = NULL;
3142 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3143 const struct icl_port_dpll *old_port_dpll =
3144 &old_crtc_state->icl_port_dplls[id];
3145 struct icl_port_dpll *new_port_dpll =
3146 &new_crtc_state->icl_port_dplls[id];
3148 new_port_dpll->pll = NULL;
3150 if (!old_port_dpll->pll)
3153 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3157 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3158 struct intel_shared_dpll *pll,
3159 struct intel_dpll_hw_state *hw_state)
3161 const enum intel_dpll_id id = pll->info->id;
3162 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3163 intel_wakeref_t wakeref;
3167 wakeref = intel_display_power_get_if_enabled(dev_priv,
3168 POWER_DOMAIN_DISPLAY_CORE);
3172 val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3173 if (!(val & PLL_ENABLE))
3176 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3177 MG_REFCLKIN_CTL(tc_port));
3178 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3180 hw_state->mg_clktop2_coreclkctl1 =
3181 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3182 hw_state->mg_clktop2_coreclkctl1 &=
3183 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3185 hw_state->mg_clktop2_hsclkctl =
3186 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3187 hw_state->mg_clktop2_hsclkctl &=
3188 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3189 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3190 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3191 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3193 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3194 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3195 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3196 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3197 MG_PLL_FRAC_LOCK(tc_port));
3198 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3200 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3201 hw_state->mg_pll_tdc_coldst_bias =
3202 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3204 if (dev_priv->cdclk.hw.ref == 38400) {
3205 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3206 hw_state->mg_pll_bias_mask = 0;
3208 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3209 hw_state->mg_pll_bias_mask = -1U;
3212 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3213 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3217 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3221 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3222 struct intel_shared_dpll *pll,
3223 struct intel_dpll_hw_state *hw_state)
3225 const enum intel_dpll_id id = pll->info->id;
3226 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3227 intel_wakeref_t wakeref;
3231 wakeref = intel_display_power_get_if_enabled(dev_priv,
3232 POWER_DOMAIN_DISPLAY_CORE);
3236 val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3237 if (!(val & PLL_ENABLE))
3241 * All registers read here have the same HIP_INDEX_REG even though
3242 * they are on different building blocks
3244 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3245 HIP_INDEX_VAL(tc_port, 0x2));
3247 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3248 DKL_REFCLKIN_CTL(tc_port));
3249 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3251 hw_state->mg_clktop2_hsclkctl =
3252 intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3253 hw_state->mg_clktop2_hsclkctl &=
3254 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3255 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3256 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3257 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3259 hw_state->mg_clktop2_coreclkctl1 =
3260 intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3261 hw_state->mg_clktop2_coreclkctl1 &=
3262 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3264 hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3265 hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3266 DKL_PLL_DIV0_PROP_COEFF_MASK |
3267 DKL_PLL_DIV0_FBPREDIV_MASK |
3268 DKL_PLL_DIV0_FBDIV_INT_MASK);
3270 hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3271 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3272 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3274 hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3275 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3276 DKL_PLL_SSC_STEP_LEN_MASK |
3277 DKL_PLL_SSC_STEP_NUM_MASK |
3280 hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3281 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3282 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3284 hw_state->mg_pll_tdc_coldst_bias =
3285 intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3286 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3287 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3291 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3295 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3296 struct intel_shared_dpll *pll,
3297 struct intel_dpll_hw_state *hw_state,
3298 i915_reg_t enable_reg)
3300 const enum intel_dpll_id id = pll->info->id;
3301 intel_wakeref_t wakeref;
3305 wakeref = intel_display_power_get_if_enabled(dev_priv,
3306 POWER_DOMAIN_DISPLAY_CORE);
3310 val = intel_de_read(dev_priv, enable_reg);
3311 if (!(val & PLL_ENABLE))
3314 if (INTEL_GEN(dev_priv) >= 12) {
3315 hw_state->cfgcr0 = intel_de_read(dev_priv,
3316 TGL_DPLL_CFGCR0(id));
3317 hw_state->cfgcr1 = intel_de_read(dev_priv,
3318 TGL_DPLL_CFGCR1(id));
3320 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3321 hw_state->cfgcr0 = intel_de_read(dev_priv,
3322 ICL_DPLL_CFGCR0(4));
3323 hw_state->cfgcr1 = intel_de_read(dev_priv,
3324 ICL_DPLL_CFGCR1(4));
3326 hw_state->cfgcr0 = intel_de_read(dev_priv,
3327 ICL_DPLL_CFGCR0(id));
3328 hw_state->cfgcr1 = intel_de_read(dev_priv,
3329 ICL_DPLL_CFGCR1(id));
3335 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3339 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3340 struct intel_shared_dpll *pll,
3341 struct intel_dpll_hw_state *hw_state)
3343 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3345 if (IS_ELKHARTLAKE(dev_priv) &&
3346 pll->info->id == DPLL_ID_EHL_DPLL4) {
3347 enable_reg = MG_PLL_ENABLE(0);
3350 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3353 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3354 struct intel_shared_dpll *pll,
3355 struct intel_dpll_hw_state *hw_state)
3357 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3360 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3361 struct intel_shared_dpll *pll)
3363 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3364 const enum intel_dpll_id id = pll->info->id;
3365 i915_reg_t cfgcr0_reg, cfgcr1_reg;
3367 if (INTEL_GEN(dev_priv) >= 12) {
3368 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3369 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3371 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3372 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3373 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3375 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3376 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3380 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3381 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3382 intel_de_posting_read(dev_priv, cfgcr1_reg);
3385 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3386 struct intel_shared_dpll *pll)
3388 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3389 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3393 * Some of the following registers have reserved fields, so program
3394 * these with RMW based on a mask. The mask can be fixed or generated
3395 * during the calc/readout phase if the mask depends on some other HW
3396 * state like refclk, see icl_calc_mg_pll_state().
3398 val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3399 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3400 val |= hw_state->mg_refclkin_ctl;
3401 intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3403 val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3404 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3405 val |= hw_state->mg_clktop2_coreclkctl1;
3406 intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3408 val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3409 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3410 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3411 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3412 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3413 val |= hw_state->mg_clktop2_hsclkctl;
3414 intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3416 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3417 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3418 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3419 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3420 hw_state->mg_pll_frac_lock);
3421 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3423 val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3424 val &= ~hw_state->mg_pll_bias_mask;
3425 val |= hw_state->mg_pll_bias;
3426 intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3428 val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3429 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3430 val |= hw_state->mg_pll_tdc_coldst_bias;
3431 intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3433 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3436 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3437 struct intel_shared_dpll *pll)
3439 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3440 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3444 * All registers programmed here have the same HIP_INDEX_REG even
3445 * though on different building block
3447 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3448 HIP_INDEX_VAL(tc_port, 0x2));
3450 /* All the registers are RMW */
3451 val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3452 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3453 val |= hw_state->mg_refclkin_ctl;
3454 intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3456 val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3457 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3458 val |= hw_state->mg_clktop2_coreclkctl1;
3459 intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3461 val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3462 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3463 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3464 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3465 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3466 val |= hw_state->mg_clktop2_hsclkctl;
3467 intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3469 val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3470 val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3471 DKL_PLL_DIV0_PROP_COEFF_MASK |
3472 DKL_PLL_DIV0_FBPREDIV_MASK |
3473 DKL_PLL_DIV0_FBDIV_INT_MASK);
3474 val |= hw_state->mg_pll_div0;
3475 intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3477 val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3478 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3479 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3480 val |= hw_state->mg_pll_div1;
3481 intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3483 val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3484 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3485 DKL_PLL_SSC_STEP_LEN_MASK |
3486 DKL_PLL_SSC_STEP_NUM_MASK |
3488 val |= hw_state->mg_pll_ssc;
3489 intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3491 val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3492 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3493 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3494 val |= hw_state->mg_pll_bias;
3495 intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3497 val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3498 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3499 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3500 val |= hw_state->mg_pll_tdc_coldst_bias;
3501 intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3503 intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3506 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3507 struct intel_shared_dpll *pll,
3508 i915_reg_t enable_reg)
3512 val = intel_de_read(dev_priv, enable_reg);
3513 val |= PLL_POWER_ENABLE;
3514 intel_de_write(dev_priv, enable_reg, val);
3517 * The spec says we need to "wait" but it also says it should be
3520 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3521 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3525 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3526 struct intel_shared_dpll *pll,
3527 i915_reg_t enable_reg)
3531 val = intel_de_read(dev_priv, enable_reg);
3533 intel_de_write(dev_priv, enable_reg, val);
3535 /* Timeout is actually 600us. */
3536 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3537 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3540 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3541 struct intel_shared_dpll *pll)
3543 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3545 if (IS_ELKHARTLAKE(dev_priv) &&
3546 pll->info->id == DPLL_ID_EHL_DPLL4) {
3547 enable_reg = MG_PLL_ENABLE(0);
3550 * We need to disable DC states when this DPLL is enabled.
3551 * This can be done by taking a reference on DPLL4 power
3554 pll->wakeref = intel_display_power_get(dev_priv,
3555 POWER_DOMAIN_DPLL_DC_OFF);
3558 icl_pll_power_enable(dev_priv, pll, enable_reg);
3560 icl_dpll_write(dev_priv, pll);
3563 * DVFS pre sequence would be here, but in our driver the cdclk code
3564 * paths should already be setting the appropriate voltage, hence we do
3568 icl_pll_enable(dev_priv, pll, enable_reg);
3570 /* DVFS post sequence would be here. See the comment above. */
3573 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3574 struct intel_shared_dpll *pll)
3576 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3578 icl_dpll_write(dev_priv, pll);
3581 * DVFS pre sequence would be here, but in our driver the cdclk code
3582 * paths should already be setting the appropriate voltage, hence we do
3586 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3588 /* DVFS post sequence would be here. See the comment above. */
3591 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3592 struct intel_shared_dpll *pll)
3594 i915_reg_t enable_reg =
3595 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3597 icl_pll_power_enable(dev_priv, pll, enable_reg);
3599 if (INTEL_GEN(dev_priv) >= 12)
3600 dkl_pll_write(dev_priv, pll);
3602 icl_mg_pll_write(dev_priv, pll);
3605 * DVFS pre sequence would be here, but in our driver the cdclk code
3606 * paths should already be setting the appropriate voltage, hence we do
3610 icl_pll_enable(dev_priv, pll, enable_reg);
3612 /* DVFS post sequence would be here. See the comment above. */
3615 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3616 struct intel_shared_dpll *pll,
3617 i915_reg_t enable_reg)
3621 /* The first steps are done by intel_ddi_post_disable(). */
3624 * DVFS pre sequence would be here, but in our driver the cdclk code
3625 * paths should already be setting the appropriate voltage, hence we do
3629 val = intel_de_read(dev_priv, enable_reg);
3631 intel_de_write(dev_priv, enable_reg, val);
3633 /* Timeout is actually 1us. */
3634 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3635 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3637 /* DVFS post sequence would be here. See the comment above. */
3639 val = intel_de_read(dev_priv, enable_reg);
3640 val &= ~PLL_POWER_ENABLE;
3641 intel_de_write(dev_priv, enable_reg, val);
3644 * The spec says we need to "wait" but it also says it should be
3647 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3648 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3652 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3653 struct intel_shared_dpll *pll)
3655 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3657 if (IS_ELKHARTLAKE(dev_priv) &&
3658 pll->info->id == DPLL_ID_EHL_DPLL4) {
3659 enable_reg = MG_PLL_ENABLE(0);
3660 icl_pll_disable(dev_priv, pll, enable_reg);
3662 intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3667 icl_pll_disable(dev_priv, pll, enable_reg);
3670 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3671 struct intel_shared_dpll *pll)
3673 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3676 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3677 struct intel_shared_dpll *pll)
3679 i915_reg_t enable_reg =
3680 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3682 icl_pll_disable(dev_priv, pll, enable_reg);
3685 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3686 const struct intel_dpll_hw_state *hw_state)
3688 drm_dbg_kms(&dev_priv->drm,
3689 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3690 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3691 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3692 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3693 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3694 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3695 hw_state->cfgcr0, hw_state->cfgcr1,
3696 hw_state->mg_refclkin_ctl,
3697 hw_state->mg_clktop2_coreclkctl1,
3698 hw_state->mg_clktop2_hsclkctl,
3699 hw_state->mg_pll_div0,
3700 hw_state->mg_pll_div1,
3701 hw_state->mg_pll_lf,
3702 hw_state->mg_pll_frac_lock,
3703 hw_state->mg_pll_ssc,
3704 hw_state->mg_pll_bias,
3705 hw_state->mg_pll_tdc_coldst_bias);
3708 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3709 .enable = combo_pll_enable,
3710 .disable = combo_pll_disable,
3711 .get_hw_state = combo_pll_get_hw_state,
3714 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3715 .enable = tbt_pll_enable,
3716 .disable = tbt_pll_disable,
3717 .get_hw_state = tbt_pll_get_hw_state,
3720 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3721 .enable = mg_pll_enable,
3722 .disable = mg_pll_disable,
3723 .get_hw_state = mg_pll_get_hw_state,
3726 static const struct dpll_info icl_plls[] = {
3727 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3728 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3729 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3730 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3731 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3732 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3733 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3737 static const struct intel_dpll_mgr icl_pll_mgr = {
3738 .dpll_info = icl_plls,
3739 .get_dplls = icl_get_dplls,
3740 .put_dplls = icl_put_dplls,
3741 .update_active_dpll = icl_update_active_dpll,
3742 .dump_hw_state = icl_dump_hw_state,
3745 static const struct dpll_info ehl_plls[] = {
3746 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3747 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3748 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3752 static const struct intel_dpll_mgr ehl_pll_mgr = {
3753 .dpll_info = ehl_plls,
3754 .get_dplls = icl_get_dplls,
3755 .put_dplls = icl_put_dplls,
3756 .dump_hw_state = icl_dump_hw_state,
3759 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3760 .enable = mg_pll_enable,
3761 .disable = mg_pll_disable,
3762 .get_hw_state = dkl_pll_get_hw_state,
3765 static const struct dpll_info tgl_plls[] = {
3766 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3767 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3768 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3769 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3770 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3771 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3772 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3773 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
3774 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
3778 static const struct intel_dpll_mgr tgl_pll_mgr = {
3779 .dpll_info = tgl_plls,
3780 .get_dplls = icl_get_dplls,
3781 .put_dplls = icl_put_dplls,
3782 .update_active_dpll = icl_update_active_dpll,
3783 .dump_hw_state = icl_dump_hw_state,
3787 * intel_shared_dpll_init - Initialize shared DPLLs
3790 * Initialize shared DPLLs for @dev.
3792 void intel_shared_dpll_init(struct drm_device *dev)
3794 struct drm_i915_private *dev_priv = to_i915(dev);
3795 const struct intel_dpll_mgr *dpll_mgr = NULL;
3796 const struct dpll_info *dpll_info;
3799 if (INTEL_GEN(dev_priv) >= 12)
3800 dpll_mgr = &tgl_pll_mgr;
3801 else if (IS_ELKHARTLAKE(dev_priv))
3802 dpll_mgr = &ehl_pll_mgr;
3803 else if (INTEL_GEN(dev_priv) >= 11)
3804 dpll_mgr = &icl_pll_mgr;
3805 else if (IS_CANNONLAKE(dev_priv))
3806 dpll_mgr = &cnl_pll_mgr;
3807 else if (IS_GEN9_BC(dev_priv))
3808 dpll_mgr = &skl_pll_mgr;
3809 else if (IS_GEN9_LP(dev_priv))
3810 dpll_mgr = &bxt_pll_mgr;
3811 else if (HAS_DDI(dev_priv))
3812 dpll_mgr = &hsw_pll_mgr;
3813 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3814 dpll_mgr = &pch_pll_mgr;
3817 dev_priv->num_shared_dpll = 0;
3821 dpll_info = dpll_mgr->dpll_info;
3823 for (i = 0; dpll_info[i].name; i++) {
3824 drm_WARN_ON(dev, i != dpll_info[i].id);
3825 dev_priv->shared_dplls[i].info = &dpll_info[i];
3828 dev_priv->dpll_mgr = dpll_mgr;
3829 dev_priv->num_shared_dpll = i;
3830 mutex_init(&dev_priv->dpll_lock);
3832 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3836 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3837 * @state: atomic state
3838 * @crtc: CRTC to reserve DPLLs for
3841 * This function reserves all required DPLLs for the given CRTC and encoder
3842 * combination in the current atomic commit @state and the new @crtc atomic
3845 * The new configuration in the atomic commit @state is made effective by
3846 * calling intel_shared_dpll_swap_state().
3848 * The reserved DPLLs should be released by calling
3849 * intel_release_shared_dplls().
3852 * True if all required DPLLs were successfully reserved.
3854 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3855 struct intel_crtc *crtc,
3856 struct intel_encoder *encoder)
3858 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3859 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3861 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
3864 return dpll_mgr->get_dplls(state, crtc, encoder);
3868 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3869 * @state: atomic state
3870 * @crtc: crtc from which the DPLLs are to be released
3872 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3873 * from the current atomic commit @state and the old @crtc atomic state.
3875 * The new configuration in the atomic commit @state is made effective by
3876 * calling intel_shared_dpll_swap_state().
3878 void intel_release_shared_dplls(struct intel_atomic_state *state,
3879 struct intel_crtc *crtc)
3881 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3882 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3885 * FIXME: this function is called for every platform having a
3886 * compute_clock hook, even though the platform doesn't yet support
3887 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3893 dpll_mgr->put_dplls(state, crtc);
3897 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3898 * @state: atomic state
3899 * @crtc: the CRTC for which to update the active DPLL
3900 * @encoder: encoder determining the type of port DPLL
3902 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3903 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3904 * DPLL selected will be based on the current mode of the encoder's port.
3906 void intel_update_active_dpll(struct intel_atomic_state *state,
3907 struct intel_crtc *crtc,
3908 struct intel_encoder *encoder)
3910 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3911 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3913 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
3916 dpll_mgr->update_active_dpll(state, crtc, encoder);
3920 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3921 * @dev_priv: i915 drm device
3922 * @hw_state: hw state to be written to the log
3924 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
3926 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3927 const struct intel_dpll_hw_state *hw_state)
3929 if (dev_priv->dpll_mgr) {
3930 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3932 /* fallback for platforms that don't use the shared dpll
3935 drm_dbg_kms(&dev_priv->drm,
3936 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3937 "fp0: 0x%x, fp1: 0x%x\n",