1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include "intel_atomic.h"
9 #include "intel_display_types.h"
10 #include "intel_fdi.h"
11 #include "intel_sideband.h"
13 void intel_fdi_link_train(struct intel_crtc *crtc,
14 const struct intel_crtc_state *crtc_state)
16 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
18 dev_priv->display.fdi_link_train(crtc, crtc_state);
22 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
24 if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
25 return crtc_state->fdi_lanes;
30 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
31 struct intel_crtc_state *pipe_config)
33 struct drm_i915_private *dev_priv = to_i915(dev);
34 struct drm_atomic_state *state = pipe_config->uapi.state;
35 struct intel_crtc *other_crtc;
36 struct intel_crtc_state *other_crtc_state;
38 drm_dbg_kms(&dev_priv->drm,
39 "checking fdi config on pipe %c, lanes %i\n",
40 pipe_name(pipe), pipe_config->fdi_lanes);
41 if (pipe_config->fdi_lanes > 4) {
42 drm_dbg_kms(&dev_priv->drm,
43 "invalid fdi lane config on pipe %c: %i lanes\n",
44 pipe_name(pipe), pipe_config->fdi_lanes);
48 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
49 if (pipe_config->fdi_lanes > 2) {
50 drm_dbg_kms(&dev_priv->drm,
51 "only 2 lanes on haswell, required: %i lanes\n",
52 pipe_config->fdi_lanes);
59 if (INTEL_NUM_PIPES(dev_priv) == 2)
62 /* Ivybridge 3 pipe is really complicated */
67 if (pipe_config->fdi_lanes <= 2)
70 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
72 intel_atomic_get_crtc_state(state, other_crtc);
73 if (IS_ERR(other_crtc_state))
74 return PTR_ERR(other_crtc_state);
76 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
77 drm_dbg_kms(&dev_priv->drm,
78 "invalid shared fdi lane config on pipe %c: %i lanes\n",
79 pipe_name(pipe), pipe_config->fdi_lanes);
84 if (pipe_config->fdi_lanes > 2) {
85 drm_dbg_kms(&dev_priv->drm,
86 "only 2 lanes on pipe %c: required %i lanes\n",
87 pipe_name(pipe), pipe_config->fdi_lanes);
91 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
93 intel_atomic_get_crtc_state(state, other_crtc);
94 if (IS_ERR(other_crtc_state))
95 return PTR_ERR(other_crtc_state);
97 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
98 drm_dbg_kms(&dev_priv->drm,
99 "fdi link B uses too many lanes to enable link C\n");
109 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
111 if (IS_IRONLAKE(i915)) {
113 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
115 i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
116 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
117 i915->fdi_pll_freq = 270000;
122 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
125 int intel_fdi_link_freq(struct drm_i915_private *i915,
126 const struct intel_crtc_state *pipe_config)
129 return pipe_config->port_clock; /* SPLL */
131 return i915->fdi_pll_freq;
134 int ilk_fdi_compute_config(struct intel_crtc *crtc,
135 struct intel_crtc_state *pipe_config)
137 struct drm_device *dev = crtc->base.dev;
138 struct drm_i915_private *i915 = to_i915(dev);
139 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
140 int lane, link_bw, fdi_dotclock, ret;
141 bool needs_recompute = false;
144 /* FDI is a binary signal running at ~2.7GHz, encoding
145 * each output octet as 10 bits. The actual frequency
146 * is stored as a divider into a 100MHz clock, and the
147 * mode pixel clock is stored in units of 1KHz.
148 * Hence the bw of each lane in terms of the mode signal
151 link_bw = intel_fdi_link_freq(i915, pipe_config);
153 fdi_dotclock = adjusted_mode->crtc_clock;
155 lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
156 pipe_config->pipe_bpp);
158 pipe_config->fdi_lanes = lane;
160 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
161 link_bw, &pipe_config->fdi_m_n, false, false);
163 ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
167 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
168 pipe_config->pipe_bpp -= 2*3;
169 drm_dbg_kms(&i915->drm,
170 "fdi link bw constraint, reducing pipe bpp to %i\n",
171 pipe_config->pipe_bpp);
172 needs_recompute = true;
173 pipe_config->bw_constrained = true;
179 return I915_DISPLAY_CONFIG_RETRY;
184 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
188 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
189 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
192 drm_WARN_ON(&dev_priv->drm,
193 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
195 drm_WARN_ON(&dev_priv->drm,
196 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
199 temp &= ~FDI_BC_BIFURCATION_SELECT;
201 temp |= FDI_BC_BIFURCATION_SELECT;
203 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
204 enable ? "en" : "dis");
205 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
206 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
209 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
214 switch (crtc->pipe) {
218 if (crtc_state->fdi_lanes > 2)
219 cpt_set_fdi_bc_bifurcation(dev_priv, false);
221 cpt_set_fdi_bc_bifurcation(dev_priv, true);
225 cpt_set_fdi_bc_bifurcation(dev_priv, true);
229 MISSING_CASE(crtc->pipe);
233 void intel_fdi_normal_train(struct intel_crtc *crtc)
235 struct drm_device *dev = crtc->base.dev;
236 struct drm_i915_private *dev_priv = to_i915(dev);
237 enum pipe pipe = crtc->pipe;
241 /* enable normal train */
242 reg = FDI_TX_CTL(pipe);
243 temp = intel_de_read(dev_priv, reg);
244 if (IS_IVYBRIDGE(dev_priv)) {
245 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
246 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
248 temp &= ~FDI_LINK_TRAIN_NONE;
249 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
251 intel_de_write(dev_priv, reg, temp);
253 reg = FDI_RX_CTL(pipe);
254 temp = intel_de_read(dev_priv, reg);
255 if (HAS_PCH_CPT(dev_priv)) {
256 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
257 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
259 temp &= ~FDI_LINK_TRAIN_NONE;
260 temp |= FDI_LINK_TRAIN_NONE;
262 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
264 /* wait one idle pattern time */
265 intel_de_posting_read(dev_priv, reg);
268 /* IVB wants error correction enabled */
269 if (IS_IVYBRIDGE(dev_priv))
270 intel_de_write(dev_priv, reg,
271 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
274 /* The FDI link training functions for ILK/Ibexpeak. */
275 static void ilk_fdi_link_train(struct intel_crtc *crtc,
276 const struct intel_crtc_state *crtc_state)
278 struct drm_device *dev = crtc->base.dev;
279 struct drm_i915_private *dev_priv = to_i915(dev);
280 enum pipe pipe = crtc->pipe;
285 * Write the TU size bits before fdi link training, so that error
288 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
289 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
291 /* FDI needs bits from pipe first */
292 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
294 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
296 reg = FDI_RX_IMR(pipe);
297 temp = intel_de_read(dev_priv, reg);
298 temp &= ~FDI_RX_SYMBOL_LOCK;
299 temp &= ~FDI_RX_BIT_LOCK;
300 intel_de_write(dev_priv, reg, temp);
301 intel_de_read(dev_priv, reg);
304 /* enable CPU FDI TX and PCH FDI RX */
305 reg = FDI_TX_CTL(pipe);
306 temp = intel_de_read(dev_priv, reg);
307 temp &= ~FDI_DP_PORT_WIDTH_MASK;
308 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
309 temp &= ~FDI_LINK_TRAIN_NONE;
310 temp |= FDI_LINK_TRAIN_PATTERN_1;
311 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
313 reg = FDI_RX_CTL(pipe);
314 temp = intel_de_read(dev_priv, reg);
315 temp &= ~FDI_LINK_TRAIN_NONE;
316 temp |= FDI_LINK_TRAIN_PATTERN_1;
317 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
319 intel_de_posting_read(dev_priv, reg);
322 /* Ironlake workaround, enable clock pointer after FDI enable*/
323 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
324 FDI_RX_PHASE_SYNC_POINTER_OVR);
325 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
326 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
328 reg = FDI_RX_IIR(pipe);
329 for (tries = 0; tries < 5; tries++) {
330 temp = intel_de_read(dev_priv, reg);
331 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
333 if ((temp & FDI_RX_BIT_LOCK)) {
334 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
335 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
340 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
343 reg = FDI_TX_CTL(pipe);
344 temp = intel_de_read(dev_priv, reg);
345 temp &= ~FDI_LINK_TRAIN_NONE;
346 temp |= FDI_LINK_TRAIN_PATTERN_2;
347 intel_de_write(dev_priv, reg, temp);
349 reg = FDI_RX_CTL(pipe);
350 temp = intel_de_read(dev_priv, reg);
351 temp &= ~FDI_LINK_TRAIN_NONE;
352 temp |= FDI_LINK_TRAIN_PATTERN_2;
353 intel_de_write(dev_priv, reg, temp);
355 intel_de_posting_read(dev_priv, reg);
358 reg = FDI_RX_IIR(pipe);
359 for (tries = 0; tries < 5; tries++) {
360 temp = intel_de_read(dev_priv, reg);
361 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
363 if (temp & FDI_RX_SYMBOL_LOCK) {
364 intel_de_write(dev_priv, reg,
365 temp | FDI_RX_SYMBOL_LOCK);
366 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
371 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
373 drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
377 static const int snb_b_fdi_train_param[] = {
378 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
379 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
380 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
381 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
384 /* The FDI link training functions for SNB/Cougarpoint. */
385 static void gen6_fdi_link_train(struct intel_crtc *crtc,
386 const struct intel_crtc_state *crtc_state)
388 struct drm_device *dev = crtc->base.dev;
389 struct drm_i915_private *dev_priv = to_i915(dev);
390 enum pipe pipe = crtc->pipe;
395 * Write the TU size bits before fdi link training, so that error
398 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
399 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
401 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
403 reg = FDI_RX_IMR(pipe);
404 temp = intel_de_read(dev_priv, reg);
405 temp &= ~FDI_RX_SYMBOL_LOCK;
406 temp &= ~FDI_RX_BIT_LOCK;
407 intel_de_write(dev_priv, reg, temp);
409 intel_de_posting_read(dev_priv, reg);
412 /* enable CPU FDI TX and PCH FDI RX */
413 reg = FDI_TX_CTL(pipe);
414 temp = intel_de_read(dev_priv, reg);
415 temp &= ~FDI_DP_PORT_WIDTH_MASK;
416 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
417 temp &= ~FDI_LINK_TRAIN_NONE;
418 temp |= FDI_LINK_TRAIN_PATTERN_1;
419 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
421 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
422 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
424 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
425 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
427 reg = FDI_RX_CTL(pipe);
428 temp = intel_de_read(dev_priv, reg);
429 if (HAS_PCH_CPT(dev_priv)) {
430 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
431 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
433 temp &= ~FDI_LINK_TRAIN_NONE;
434 temp |= FDI_LINK_TRAIN_PATTERN_1;
436 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
438 intel_de_posting_read(dev_priv, reg);
441 for (i = 0; i < 4; i++) {
442 reg = FDI_TX_CTL(pipe);
443 temp = intel_de_read(dev_priv, reg);
444 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
445 temp |= snb_b_fdi_train_param[i];
446 intel_de_write(dev_priv, reg, temp);
448 intel_de_posting_read(dev_priv, reg);
451 for (retry = 0; retry < 5; retry++) {
452 reg = FDI_RX_IIR(pipe);
453 temp = intel_de_read(dev_priv, reg);
454 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
455 if (temp & FDI_RX_BIT_LOCK) {
456 intel_de_write(dev_priv, reg,
457 temp | FDI_RX_BIT_LOCK);
458 drm_dbg_kms(&dev_priv->drm,
459 "FDI train 1 done.\n");
468 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
471 reg = FDI_TX_CTL(pipe);
472 temp = intel_de_read(dev_priv, reg);
473 temp &= ~FDI_LINK_TRAIN_NONE;
474 temp |= FDI_LINK_TRAIN_PATTERN_2;
475 if (IS_SANDYBRIDGE(dev_priv)) {
476 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
478 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
480 intel_de_write(dev_priv, reg, temp);
482 reg = FDI_RX_CTL(pipe);
483 temp = intel_de_read(dev_priv, reg);
484 if (HAS_PCH_CPT(dev_priv)) {
485 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
486 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
488 temp &= ~FDI_LINK_TRAIN_NONE;
489 temp |= FDI_LINK_TRAIN_PATTERN_2;
491 intel_de_write(dev_priv, reg, temp);
493 intel_de_posting_read(dev_priv, reg);
496 for (i = 0; i < 4; i++) {
497 reg = FDI_TX_CTL(pipe);
498 temp = intel_de_read(dev_priv, reg);
499 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
500 temp |= snb_b_fdi_train_param[i];
501 intel_de_write(dev_priv, reg, temp);
503 intel_de_posting_read(dev_priv, reg);
506 for (retry = 0; retry < 5; retry++) {
507 reg = FDI_RX_IIR(pipe);
508 temp = intel_de_read(dev_priv, reg);
509 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
510 if (temp & FDI_RX_SYMBOL_LOCK) {
511 intel_de_write(dev_priv, reg,
512 temp | FDI_RX_SYMBOL_LOCK);
513 drm_dbg_kms(&dev_priv->drm,
514 "FDI train 2 done.\n");
523 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
525 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
528 /* Manual link training for Ivy Bridge A0 parts */
529 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
530 const struct intel_crtc_state *crtc_state)
532 struct drm_device *dev = crtc->base.dev;
533 struct drm_i915_private *dev_priv = to_i915(dev);
534 enum pipe pipe = crtc->pipe;
538 ivb_update_fdi_bc_bifurcation(crtc_state);
541 * Write the TU size bits before fdi link training, so that error
544 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
545 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
547 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
549 reg = FDI_RX_IMR(pipe);
550 temp = intel_de_read(dev_priv, reg);
551 temp &= ~FDI_RX_SYMBOL_LOCK;
552 temp &= ~FDI_RX_BIT_LOCK;
553 intel_de_write(dev_priv, reg, temp);
555 intel_de_posting_read(dev_priv, reg);
558 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
559 intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
561 /* Try each vswing and preemphasis setting twice before moving on */
562 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
563 /* disable first in case we need to retry */
564 reg = FDI_TX_CTL(pipe);
565 temp = intel_de_read(dev_priv, reg);
566 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
567 temp &= ~FDI_TX_ENABLE;
568 intel_de_write(dev_priv, reg, temp);
570 reg = FDI_RX_CTL(pipe);
571 temp = intel_de_read(dev_priv, reg);
572 temp &= ~FDI_LINK_TRAIN_AUTO;
573 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
574 temp &= ~FDI_RX_ENABLE;
575 intel_de_write(dev_priv, reg, temp);
577 /* enable CPU FDI TX and PCH FDI RX */
578 reg = FDI_TX_CTL(pipe);
579 temp = intel_de_read(dev_priv, reg);
580 temp &= ~FDI_DP_PORT_WIDTH_MASK;
581 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
582 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
583 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
584 temp |= snb_b_fdi_train_param[j/2];
585 temp |= FDI_COMPOSITE_SYNC;
586 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
588 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
589 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
591 reg = FDI_RX_CTL(pipe);
592 temp = intel_de_read(dev_priv, reg);
593 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
594 temp |= FDI_COMPOSITE_SYNC;
595 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
597 intel_de_posting_read(dev_priv, reg);
598 udelay(1); /* should be 0.5us */
600 for (i = 0; i < 4; i++) {
601 reg = FDI_RX_IIR(pipe);
602 temp = intel_de_read(dev_priv, reg);
603 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
605 if (temp & FDI_RX_BIT_LOCK ||
606 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
607 intel_de_write(dev_priv, reg,
608 temp | FDI_RX_BIT_LOCK);
609 drm_dbg_kms(&dev_priv->drm,
610 "FDI train 1 done, level %i.\n",
614 udelay(1); /* should be 0.5us */
617 drm_dbg_kms(&dev_priv->drm,
618 "FDI train 1 fail on vswing %d\n", j / 2);
623 reg = FDI_TX_CTL(pipe);
624 temp = intel_de_read(dev_priv, reg);
625 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
626 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
627 intel_de_write(dev_priv, reg, temp);
629 reg = FDI_RX_CTL(pipe);
630 temp = intel_de_read(dev_priv, reg);
631 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
632 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
633 intel_de_write(dev_priv, reg, temp);
635 intel_de_posting_read(dev_priv, reg);
636 udelay(2); /* should be 1.5us */
638 for (i = 0; i < 4; i++) {
639 reg = FDI_RX_IIR(pipe);
640 temp = intel_de_read(dev_priv, reg);
641 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
643 if (temp & FDI_RX_SYMBOL_LOCK ||
644 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
645 intel_de_write(dev_priv, reg,
646 temp | FDI_RX_SYMBOL_LOCK);
647 drm_dbg_kms(&dev_priv->drm,
648 "FDI train 2 done, level %i.\n",
652 udelay(2); /* should be 1.5us */
655 drm_dbg_kms(&dev_priv->drm,
656 "FDI train 2 fail on vswing %d\n", j / 2);
660 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
663 /* Starting with Haswell, different DDI ports can work in FDI mode for
664 * connection to the PCH-located connectors. For this, it is necessary to train
665 * both the DDI port and PCH receiver for the desired DDI buffer settings.
667 * The recommended port to work in FDI mode is DDI E, which we use here. Also,
668 * please note that when FDI mode is active on DDI E, it shares 2 lines with
669 * DDI A (which is used for eDP)
671 void hsw_fdi_link_train(struct intel_encoder *encoder,
672 const struct intel_crtc_state *crtc_state)
674 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
675 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
676 u32 temp, i, rx_ctl_val;
679 encoder->get_buf_trans(encoder, crtc_state, &n_entries);
681 hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
683 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
684 * mode set "sequence for CRT port" document:
685 * - TP1 to TP2 time with the default value
688 * WaFDIAutoLinkSetTimingOverrride:hsw
690 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
691 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
693 /* Enable the PCH Receiver FDI PLL */
694 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
696 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
697 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
698 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
701 /* Switch from Rawclk to PCDclk */
702 rx_ctl_val |= FDI_PCDCLK;
703 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
705 /* Configure Port Clock Select */
706 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
707 intel_ddi_enable_clock(encoder, crtc_state);
709 /* Start the training iterating through available voltages and emphasis,
710 * testing each value twice. */
711 for (i = 0; i < n_entries * 2; i++) {
712 /* Configure DP_TP_CTL with auto-training */
713 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
714 DP_TP_CTL_FDI_AUTOTRAIN |
715 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
716 DP_TP_CTL_LINK_TRAIN_PAT1 |
719 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
720 * DDI E does not support port reversal, the functionality is
721 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
722 * port reversal bit */
723 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
724 DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
725 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
729 /* Program PCH FDI Receiver TU */
730 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
732 /* Enable PCH FDI Receiver with auto-training */
733 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
734 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
735 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
737 /* Wait for FDI receiver lane calibration */
740 /* Unset FDI_RX_MISC pwrdn lanes */
741 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
742 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
743 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
744 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
746 /* Wait for FDI auto training time */
749 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
750 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
751 drm_dbg_kms(&dev_priv->drm,
752 "FDI link training done on step %d\n", i);
757 * Leave things enabled even if we failed to train FDI.
758 * Results in less fireworks from the state checker.
760 if (i == n_entries * 2 - 1) {
761 drm_err(&dev_priv->drm, "FDI link training failed!\n");
765 rx_ctl_val &= ~FDI_RX_ENABLE;
766 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
767 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
769 temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
770 temp &= ~DDI_BUF_CTL_ENABLE;
771 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
772 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
774 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
775 temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
776 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
777 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
778 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
779 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
781 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
783 /* Reset FDI_RX_MISC pwrdn lanes */
784 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
785 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
786 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
787 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
788 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
791 /* Enable normal pixel sending for FDI */
792 intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
793 DP_TP_CTL_FDI_AUTOTRAIN |
794 DP_TP_CTL_LINK_TRAIN_NORMAL |
795 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
799 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
801 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
802 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
803 enum pipe pipe = crtc->pipe;
807 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
808 reg = FDI_RX_CTL(pipe);
809 temp = intel_de_read(dev_priv, reg);
810 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
811 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
812 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
813 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
815 intel_de_posting_read(dev_priv, reg);
818 /* Switch from Rawclk to PCDclk */
819 temp = intel_de_read(dev_priv, reg);
820 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
822 intel_de_posting_read(dev_priv, reg);
825 /* Enable CPU FDI TX PLL, always on for Ironlake */
826 reg = FDI_TX_CTL(pipe);
827 temp = intel_de_read(dev_priv, reg);
828 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
829 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
831 intel_de_posting_read(dev_priv, reg);
836 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
838 struct drm_device *dev = crtc->base.dev;
839 struct drm_i915_private *dev_priv = to_i915(dev);
840 enum pipe pipe = crtc->pipe;
844 /* Switch from PCDclk to Rawclk */
845 reg = FDI_RX_CTL(pipe);
846 temp = intel_de_read(dev_priv, reg);
847 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
849 /* Disable CPU FDI TX PLL */
850 reg = FDI_TX_CTL(pipe);
851 temp = intel_de_read(dev_priv, reg);
852 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
854 intel_de_posting_read(dev_priv, reg);
857 reg = FDI_RX_CTL(pipe);
858 temp = intel_de_read(dev_priv, reg);
859 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
861 /* Wait for the clocks to turn off. */
862 intel_de_posting_read(dev_priv, reg);
866 void ilk_fdi_disable(struct intel_crtc *crtc)
868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
869 enum pipe pipe = crtc->pipe;
873 /* disable CPU FDI tx and PCH FDI rx */
874 reg = FDI_TX_CTL(pipe);
875 temp = intel_de_read(dev_priv, reg);
876 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
877 intel_de_posting_read(dev_priv, reg);
879 reg = FDI_RX_CTL(pipe);
880 temp = intel_de_read(dev_priv, reg);
881 temp &= ~(0x7 << 16);
882 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
883 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
885 intel_de_posting_read(dev_priv, reg);
888 /* Ironlake workaround, disable clock pointer after downing FDI */
889 if (HAS_PCH_IBX(dev_priv))
890 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
891 FDI_RX_PHASE_SYNC_POINTER_OVR);
893 /* still set train pattern 1 */
894 reg = FDI_TX_CTL(pipe);
895 temp = intel_de_read(dev_priv, reg);
896 temp &= ~FDI_LINK_TRAIN_NONE;
897 temp |= FDI_LINK_TRAIN_PATTERN_1;
898 intel_de_write(dev_priv, reg, temp);
900 reg = FDI_RX_CTL(pipe);
901 temp = intel_de_read(dev_priv, reg);
902 if (HAS_PCH_CPT(dev_priv)) {
903 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
904 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
906 temp &= ~FDI_LINK_TRAIN_NONE;
907 temp |= FDI_LINK_TRAIN_PATTERN_1;
909 /* BPC in FDI rx is consistent with that in PIPECONF */
910 temp &= ~(0x07 << 16);
911 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
912 intel_de_write(dev_priv, reg, temp);
914 intel_de_posting_read(dev_priv, reg);
918 static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
922 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
923 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
924 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
926 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
927 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
928 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
930 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
931 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
932 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
934 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
935 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
936 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
939 /* WaMPhyProgramming:hsw */
940 void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
944 lpt_fdi_reset_mphy(dev_priv);
946 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
947 tmp &= ~(0xFF << 24);
949 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
951 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
953 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
955 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
957 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
959 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
960 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
961 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
963 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
964 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
965 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
967 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
970 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
972 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
975 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
977 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
980 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
982 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
985 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
987 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
988 tmp &= ~(0xFF << 16);
990 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
992 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
993 tmp &= ~(0xFF << 16);
995 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
997 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
999 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
1001 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
1003 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
1005 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
1006 tmp &= ~(0xF << 28);
1008 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
1010 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
1011 tmp &= ~(0xF << 28);
1013 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
1017 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1019 if (IS_IRONLAKE(dev_priv)) {
1020 dev_priv->display.fdi_link_train = ilk_fdi_link_train;
1021 } else if (IS_SANDYBRIDGE(dev_priv)) {
1022 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
1023 } else if (IS_IVYBRIDGE(dev_priv)) {
1024 /* FIXME: detect B0+ stepping and use auto training */
1025 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;