drm/i915/dp: localize link rate index variable more
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_edid.h>
40 #include "intel_drv.h"
41 #include <drm/i915_drm.h>
42 #include "i915_drv.h"
43
44 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
45
46 /* Compliance test status bits  */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
48 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51
52 struct dp_link_dpll {
53         int clock;
54         struct dpll dpll;
55 };
56
57 static const struct dp_link_dpll gen4_dpll[] = {
58         { 162000,
59                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60         { 270000,
61                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63
64 static const struct dp_link_dpll pch_dpll[] = {
65         { 162000,
66                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67         { 270000,
68                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70
71 static const struct dp_link_dpll vlv_dpll[] = {
72         { 162000,
73                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74         { 270000,
75                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77
78 /*
79  * CHV supports eDP 1.4 that have  more link rates.
80  * Below only provides the fixed rate but exclude variable rate.
81  */
82 static const struct dp_link_dpll chv_dpll[] = {
83         /*
84          * CHV requires to program fractional division for m2.
85          * m2 is stored in fixed point format using formula below
86          * (m2_int << 22) | m2_fraction
87          */
88         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
89                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90         { 270000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92         { 540000,       /* m2_int = 27, m2_fraction = 0 */
93                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97                                   324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99                                   324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101
102 /**
103  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104  * @intel_dp: DP struct
105  *
106  * If a CPU or PCH DP output is attached to an eDP panel, this function
107  * will return true, and false otherwise.
108  */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119
120         return intel_dig_port->base.base.dev;
121 }
122
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133                                       enum pipe pipe);
134 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
135
136 static int intel_dp_num_rates(u8 link_bw_code)
137 {
138         switch (link_bw_code) {
139         default:
140                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
141                      link_bw_code);
142         case DP_LINK_BW_1_62:
143                 return 1;
144         case DP_LINK_BW_2_7:
145                 return 2;
146         case DP_LINK_BW_5_4:
147                 return 3;
148         }
149 }
150
151 /* update sink rates from dpcd */
152 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
153 {
154         int i, num_rates;
155
156         num_rates = intel_dp_num_rates(intel_dp->dpcd[DP_MAX_LINK_RATE]);
157
158         for (i = 0; i < num_rates; i++)
159                 intel_dp->sink_rates[i] = default_rates[i];
160
161         intel_dp->num_sink_rates = num_rates;
162 }
163
164 /* Theoretical max between source and sink */
165 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
166 {
167         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
168 }
169
170 /* Theoretical max between source and sink */
171 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
172 {
173         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
174         int source_max = intel_dig_port->max_lanes;
175         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
176
177         return min(source_max, sink_max);
178 }
179
180 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
181 {
182         return intel_dp->max_link_lane_count;
183 }
184
185 int
186 intel_dp_link_required(int pixel_clock, int bpp)
187 {
188         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
189         return DIV_ROUND_UP(pixel_clock * bpp, 8);
190 }
191
192 int
193 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 {
195         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
196          * link rate that is generally expressed in Gbps. Since, 8 bits of data
197          * is transmitted every LS_Clk per lane, there is no need to account for
198          * the channel encoding that is done in the PHY layer here.
199          */
200
201         return max_link_clock * max_lanes;
202 }
203
204 static int
205 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
206 {
207         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
208         struct intel_encoder *encoder = &intel_dig_port->base;
209         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
210         int max_dotclk = dev_priv->max_dotclk_freq;
211         int ds_max_dotclk;
212
213         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
214
215         if (type != DP_DS_PORT_TYPE_VGA)
216                 return max_dotclk;
217
218         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
219                                                     intel_dp->downstream_ports);
220
221         if (ds_max_dotclk != 0)
222                 max_dotclk = min(max_dotclk, ds_max_dotclk);
223
224         return max_dotclk;
225 }
226
227 static void
228 intel_dp_set_source_rates(struct intel_dp *intel_dp)
229 {
230         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
231         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
232         const int *source_rates;
233         int size;
234
235         /* This should only be done once */
236         WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
237
238         if (IS_GEN9_LP(dev_priv)) {
239                 source_rates = bxt_rates;
240                 size = ARRAY_SIZE(bxt_rates);
241         } else if (IS_GEN9_BC(dev_priv)) {
242                 source_rates = skl_rates;
243                 size = ARRAY_SIZE(skl_rates);
244         } else {
245                 source_rates = default_rates;
246                 size = ARRAY_SIZE(default_rates);
247         }
248
249         /* This depends on the fact that 5.4 is last value in the array */
250         if (!intel_dp_source_supports_hbr2(intel_dp))
251                 size--;
252
253         intel_dp->source_rates = source_rates;
254         intel_dp->num_source_rates = size;
255 }
256
257 static int intersect_rates(const int *source_rates, int source_len,
258                            const int *sink_rates, int sink_len,
259                            int *common_rates)
260 {
261         int i = 0, j = 0, k = 0;
262
263         while (i < source_len && j < sink_len) {
264                 if (source_rates[i] == sink_rates[j]) {
265                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
266                                 return k;
267                         common_rates[k] = source_rates[i];
268                         ++k;
269                         ++i;
270                         ++j;
271                 } else if (source_rates[i] < sink_rates[j]) {
272                         ++i;
273                 } else {
274                         ++j;
275                 }
276         }
277         return k;
278 }
279
280 /* return index of rate in rates array, or -1 if not found */
281 static int intel_dp_rate_index(const int *rates, int len, int rate)
282 {
283         int i;
284
285         for (i = 0; i < len; i++)
286                 if (rate == rates[i])
287                         return i;
288
289         return -1;
290 }
291
292 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
293 {
294         WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
295
296         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
297                                                      intel_dp->num_source_rates,
298                                                      intel_dp->sink_rates,
299                                                      intel_dp->num_sink_rates,
300                                                      intel_dp->common_rates);
301
302         /* Paranoia, there should always be something in common. */
303         if (WARN_ON(intel_dp->num_common_rates == 0)) {
304                 intel_dp->common_rates[0] = default_rates[0];
305                 intel_dp->num_common_rates = 1;
306         }
307 }
308
309 /* get length of common rates potentially limited by max_rate */
310 static int intel_dp_common_len_rate_limit(struct intel_dp *intel_dp,
311                                           int max_rate)
312 {
313         const int *common_rates = intel_dp->common_rates;
314         int i, common_len = intel_dp->num_common_rates;
315
316         /* Limit results by potentially reduced max rate */
317         for (i = 0; i < common_len; i++) {
318                 if (common_rates[common_len - i - 1] <= max_rate)
319                         return common_len - i;
320         }
321
322         return 0;
323 }
324
325 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
326                                             int link_rate, uint8_t lane_count)
327 {
328         int index;
329
330         index = intel_dp_rate_index(intel_dp->common_rates,
331                                     intel_dp->num_common_rates,
332                                     link_rate);
333         if (index > 0) {
334                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
335                 intel_dp->max_link_lane_count = lane_count;
336         } else if (lane_count > 1) {
337                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
338                 intel_dp->max_link_lane_count = lane_count >> 1;
339         } else {
340                 DRM_ERROR("Link Training Unsuccessful\n");
341                 return -1;
342         }
343
344         return 0;
345 }
346
347 static enum drm_mode_status
348 intel_dp_mode_valid(struct drm_connector *connector,
349                     struct drm_display_mode *mode)
350 {
351         struct intel_dp *intel_dp = intel_attached_dp(connector);
352         struct intel_connector *intel_connector = to_intel_connector(connector);
353         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
354         int target_clock = mode->clock;
355         int max_rate, mode_rate, max_lanes, max_link_clock;
356         int max_dotclk;
357
358         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
359
360         if (is_edp(intel_dp) && fixed_mode) {
361                 if (mode->hdisplay > fixed_mode->hdisplay)
362                         return MODE_PANEL;
363
364                 if (mode->vdisplay > fixed_mode->vdisplay)
365                         return MODE_PANEL;
366
367                 target_clock = fixed_mode->clock;
368         }
369
370         max_link_clock = intel_dp_max_link_rate(intel_dp);
371         max_lanes = intel_dp_max_lane_count(intel_dp);
372
373         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
374         mode_rate = intel_dp_link_required(target_clock, 18);
375
376         if (mode_rate > max_rate || target_clock > max_dotclk)
377                 return MODE_CLOCK_HIGH;
378
379         if (mode->clock < 10000)
380                 return MODE_CLOCK_LOW;
381
382         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
383                 return MODE_H_ILLEGAL;
384
385         return MODE_OK;
386 }
387
388 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
389 {
390         int     i;
391         uint32_t v = 0;
392
393         if (src_bytes > 4)
394                 src_bytes = 4;
395         for (i = 0; i < src_bytes; i++)
396                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
397         return v;
398 }
399
400 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
401 {
402         int i;
403         if (dst_bytes > 4)
404                 dst_bytes = 4;
405         for (i = 0; i < dst_bytes; i++)
406                 dst[i] = src >> ((3-i) * 8);
407 }
408
409 static void
410 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
411                                     struct intel_dp *intel_dp);
412 static void
413 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
414                                               struct intel_dp *intel_dp,
415                                               bool force_disable_vdd);
416 static void
417 intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
418
419 static void pps_lock(struct intel_dp *intel_dp)
420 {
421         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
422         struct intel_encoder *encoder = &intel_dig_port->base;
423         struct drm_device *dev = encoder->base.dev;
424         struct drm_i915_private *dev_priv = to_i915(dev);
425
426         /*
427          * See vlv_power_sequencer_reset() why we need
428          * a power domain reference here.
429          */
430         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
431
432         mutex_lock(&dev_priv->pps_mutex);
433 }
434
435 static void pps_unlock(struct intel_dp *intel_dp)
436 {
437         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
438         struct intel_encoder *encoder = &intel_dig_port->base;
439         struct drm_device *dev = encoder->base.dev;
440         struct drm_i915_private *dev_priv = to_i915(dev);
441
442         mutex_unlock(&dev_priv->pps_mutex);
443
444         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
445 }
446
447 static void
448 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
449 {
450         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
451         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
452         enum pipe pipe = intel_dp->pps_pipe;
453         bool pll_enabled, release_cl_override = false;
454         enum dpio_phy phy = DPIO_PHY(pipe);
455         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
456         uint32_t DP;
457
458         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
459                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
460                  pipe_name(pipe), port_name(intel_dig_port->port)))
461                 return;
462
463         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
464                       pipe_name(pipe), port_name(intel_dig_port->port));
465
466         /* Preserve the BIOS-computed detected bit. This is
467          * supposed to be read-only.
468          */
469         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
470         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
471         DP |= DP_PORT_WIDTH(1);
472         DP |= DP_LINK_TRAIN_PAT_1;
473
474         if (IS_CHERRYVIEW(dev_priv))
475                 DP |= DP_PIPE_SELECT_CHV(pipe);
476         else if (pipe == PIPE_B)
477                 DP |= DP_PIPEB_SELECT;
478
479         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
480
481         /*
482          * The DPLL for the pipe must be enabled for this to work.
483          * So enable temporarily it if it's not already enabled.
484          */
485         if (!pll_enabled) {
486                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
487                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
488
489                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
490                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
491                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
492                                   pipe_name(pipe));
493                         return;
494                 }
495         }
496
497         /*
498          * Similar magic as in intel_dp_enable_port().
499          * We _must_ do this port enable + disable trick
500          * to make this power seqeuencer lock onto the port.
501          * Otherwise even VDD force bit won't work.
502          */
503         I915_WRITE(intel_dp->output_reg, DP);
504         POSTING_READ(intel_dp->output_reg);
505
506         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
507         POSTING_READ(intel_dp->output_reg);
508
509         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
510         POSTING_READ(intel_dp->output_reg);
511
512         if (!pll_enabled) {
513                 vlv_force_pll_off(dev_priv, pipe);
514
515                 if (release_cl_override)
516                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
517         }
518 }
519
520 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
521 {
522         struct intel_encoder *encoder;
523         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
524
525         /*
526          * We don't have power sequencer currently.
527          * Pick one that's not used by other ports.
528          */
529         for_each_intel_encoder(&dev_priv->drm, encoder) {
530                 struct intel_dp *intel_dp;
531
532                 if (encoder->type != INTEL_OUTPUT_DP &&
533                     encoder->type != INTEL_OUTPUT_EDP)
534                         continue;
535
536                 intel_dp = enc_to_intel_dp(&encoder->base);
537
538                 if (encoder->type == INTEL_OUTPUT_EDP) {
539                         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
540                                 intel_dp->active_pipe != intel_dp->pps_pipe);
541
542                         if (intel_dp->pps_pipe != INVALID_PIPE)
543                                 pipes &= ~(1 << intel_dp->pps_pipe);
544                 } else {
545                         WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
546
547                         if (intel_dp->active_pipe != INVALID_PIPE)
548                                 pipes &= ~(1 << intel_dp->active_pipe);
549                 }
550         }
551
552         if (pipes == 0)
553                 return INVALID_PIPE;
554
555         return ffs(pipes) - 1;
556 }
557
558 static enum pipe
559 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
560 {
561         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
562         struct drm_device *dev = intel_dig_port->base.base.dev;
563         struct drm_i915_private *dev_priv = to_i915(dev);
564         enum pipe pipe;
565
566         lockdep_assert_held(&dev_priv->pps_mutex);
567
568         /* We should never land here with regular DP ports */
569         WARN_ON(!is_edp(intel_dp));
570
571         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
572                 intel_dp->active_pipe != intel_dp->pps_pipe);
573
574         if (intel_dp->pps_pipe != INVALID_PIPE)
575                 return intel_dp->pps_pipe;
576
577         pipe = vlv_find_free_pps(dev_priv);
578
579         /*
580          * Didn't find one. This should not happen since there
581          * are two power sequencers and up to two eDP ports.
582          */
583         if (WARN_ON(pipe == INVALID_PIPE))
584                 pipe = PIPE_A;
585
586         vlv_steal_power_sequencer(dev, pipe);
587         intel_dp->pps_pipe = pipe;
588
589         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
590                       pipe_name(intel_dp->pps_pipe),
591                       port_name(intel_dig_port->port));
592
593         /* init power sequencer on this pipe and port */
594         intel_dp_init_panel_power_sequencer(dev, intel_dp);
595         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
596
597         /*
598          * Even vdd force doesn't work until we've made
599          * the power sequencer lock in on the port.
600          */
601         vlv_power_sequencer_kick(intel_dp);
602
603         return intel_dp->pps_pipe;
604 }
605
606 static int
607 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
608 {
609         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
610         struct drm_device *dev = intel_dig_port->base.base.dev;
611         struct drm_i915_private *dev_priv = to_i915(dev);
612
613         lockdep_assert_held(&dev_priv->pps_mutex);
614
615         /* We should never land here with regular DP ports */
616         WARN_ON(!is_edp(intel_dp));
617
618         /*
619          * TODO: BXT has 2 PPS instances. The correct port->PPS instance
620          * mapping needs to be retrieved from VBT, for now just hard-code to
621          * use instance #0 always.
622          */
623         if (!intel_dp->pps_reset)
624                 return 0;
625
626         intel_dp->pps_reset = false;
627
628         /*
629          * Only the HW needs to be reprogrammed, the SW state is fixed and
630          * has been setup during connector init.
631          */
632         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
633
634         return 0;
635 }
636
637 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
638                                enum pipe pipe);
639
640 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
641                                enum pipe pipe)
642 {
643         return I915_READ(PP_STATUS(pipe)) & PP_ON;
644 }
645
646 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
647                                 enum pipe pipe)
648 {
649         return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
650 }
651
652 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
653                          enum pipe pipe)
654 {
655         return true;
656 }
657
658 static enum pipe
659 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
660                      enum port port,
661                      vlv_pipe_check pipe_check)
662 {
663         enum pipe pipe;
664
665         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
666                 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
667                         PANEL_PORT_SELECT_MASK;
668
669                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
670                         continue;
671
672                 if (!pipe_check(dev_priv, pipe))
673                         continue;
674
675                 return pipe;
676         }
677
678         return INVALID_PIPE;
679 }
680
681 static void
682 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
683 {
684         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
685         struct drm_device *dev = intel_dig_port->base.base.dev;
686         struct drm_i915_private *dev_priv = to_i915(dev);
687         enum port port = intel_dig_port->port;
688
689         lockdep_assert_held(&dev_priv->pps_mutex);
690
691         /* try to find a pipe with this port selected */
692         /* first pick one where the panel is on */
693         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
694                                                   vlv_pipe_has_pp_on);
695         /* didn't find one? pick one where vdd is on */
696         if (intel_dp->pps_pipe == INVALID_PIPE)
697                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
698                                                           vlv_pipe_has_vdd_on);
699         /* didn't find one? pick one with just the correct port */
700         if (intel_dp->pps_pipe == INVALID_PIPE)
701                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
702                                                           vlv_pipe_any);
703
704         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
705         if (intel_dp->pps_pipe == INVALID_PIPE) {
706                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
707                               port_name(port));
708                 return;
709         }
710
711         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
712                       port_name(port), pipe_name(intel_dp->pps_pipe));
713
714         intel_dp_init_panel_power_sequencer(dev, intel_dp);
715         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
716 }
717
718 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
719 {
720         struct drm_device *dev = &dev_priv->drm;
721         struct intel_encoder *encoder;
722
723         if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
724                     !IS_GEN9_LP(dev_priv)))
725                 return;
726
727         /*
728          * We can't grab pps_mutex here due to deadlock with power_domain
729          * mutex when power_domain functions are called while holding pps_mutex.
730          * That also means that in order to use pps_pipe the code needs to
731          * hold both a power domain reference and pps_mutex, and the power domain
732          * reference get/put must be done while _not_ holding pps_mutex.
733          * pps_{lock,unlock}() do these steps in the correct order, so one
734          * should use them always.
735          */
736
737         for_each_intel_encoder(dev, encoder) {
738                 struct intel_dp *intel_dp;
739
740                 if (encoder->type != INTEL_OUTPUT_DP &&
741                     encoder->type != INTEL_OUTPUT_EDP)
742                         continue;
743
744                 intel_dp = enc_to_intel_dp(&encoder->base);
745
746                 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
747
748                 if (encoder->type != INTEL_OUTPUT_EDP)
749                         continue;
750
751                 if (IS_GEN9_LP(dev_priv))
752                         intel_dp->pps_reset = true;
753                 else
754                         intel_dp->pps_pipe = INVALID_PIPE;
755         }
756 }
757
758 struct pps_registers {
759         i915_reg_t pp_ctrl;
760         i915_reg_t pp_stat;
761         i915_reg_t pp_on;
762         i915_reg_t pp_off;
763         i915_reg_t pp_div;
764 };
765
766 static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
767                                     struct intel_dp *intel_dp,
768                                     struct pps_registers *regs)
769 {
770         int pps_idx = 0;
771
772         memset(regs, 0, sizeof(*regs));
773
774         if (IS_GEN9_LP(dev_priv))
775                 pps_idx = bxt_power_sequencer_idx(intel_dp);
776         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
777                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
778
779         regs->pp_ctrl = PP_CONTROL(pps_idx);
780         regs->pp_stat = PP_STATUS(pps_idx);
781         regs->pp_on = PP_ON_DELAYS(pps_idx);
782         regs->pp_off = PP_OFF_DELAYS(pps_idx);
783         if (!IS_GEN9_LP(dev_priv))
784                 regs->pp_div = PP_DIVISOR(pps_idx);
785 }
786
787 static i915_reg_t
788 _pp_ctrl_reg(struct intel_dp *intel_dp)
789 {
790         struct pps_registers regs;
791
792         intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
793                                 &regs);
794
795         return regs.pp_ctrl;
796 }
797
798 static i915_reg_t
799 _pp_stat_reg(struct intel_dp *intel_dp)
800 {
801         struct pps_registers regs;
802
803         intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
804                                 &regs);
805
806         return regs.pp_stat;
807 }
808
809 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
810    This function only applicable when panel PM state is not to be tracked */
811 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
812                               void *unused)
813 {
814         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
815                                                  edp_notifier);
816         struct drm_device *dev = intel_dp_to_dev(intel_dp);
817         struct drm_i915_private *dev_priv = to_i915(dev);
818
819         if (!is_edp(intel_dp) || code != SYS_RESTART)
820                 return 0;
821
822         pps_lock(intel_dp);
823
824         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
825                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
826                 i915_reg_t pp_ctrl_reg, pp_div_reg;
827                 u32 pp_div;
828
829                 pp_ctrl_reg = PP_CONTROL(pipe);
830                 pp_div_reg  = PP_DIVISOR(pipe);
831                 pp_div = I915_READ(pp_div_reg);
832                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
833
834                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
835                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
836                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
837                 msleep(intel_dp->panel_power_cycle_delay);
838         }
839
840         pps_unlock(intel_dp);
841
842         return 0;
843 }
844
845 static bool edp_have_panel_power(struct intel_dp *intel_dp)
846 {
847         struct drm_device *dev = intel_dp_to_dev(intel_dp);
848         struct drm_i915_private *dev_priv = to_i915(dev);
849
850         lockdep_assert_held(&dev_priv->pps_mutex);
851
852         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
853             intel_dp->pps_pipe == INVALID_PIPE)
854                 return false;
855
856         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
857 }
858
859 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
860 {
861         struct drm_device *dev = intel_dp_to_dev(intel_dp);
862         struct drm_i915_private *dev_priv = to_i915(dev);
863
864         lockdep_assert_held(&dev_priv->pps_mutex);
865
866         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
867             intel_dp->pps_pipe == INVALID_PIPE)
868                 return false;
869
870         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
871 }
872
873 static void
874 intel_dp_check_edp(struct intel_dp *intel_dp)
875 {
876         struct drm_device *dev = intel_dp_to_dev(intel_dp);
877         struct drm_i915_private *dev_priv = to_i915(dev);
878
879         if (!is_edp(intel_dp))
880                 return;
881
882         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
883                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
884                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
885                               I915_READ(_pp_stat_reg(intel_dp)),
886                               I915_READ(_pp_ctrl_reg(intel_dp)));
887         }
888 }
889
890 static uint32_t
891 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
892 {
893         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
894         struct drm_device *dev = intel_dig_port->base.base.dev;
895         struct drm_i915_private *dev_priv = to_i915(dev);
896         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
897         uint32_t status;
898         bool done;
899
900 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
901         if (has_aux_irq)
902                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
903                                           msecs_to_jiffies_timeout(10));
904         else
905                 done = wait_for(C, 10) == 0;
906         if (!done)
907                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
908                           has_aux_irq);
909 #undef C
910
911         return status;
912 }
913
914 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
915 {
916         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
917         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
918
919         if (index)
920                 return 0;
921
922         /*
923          * The clock divider is based off the hrawclk, and would like to run at
924          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
925          */
926         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
927 }
928
929 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
930 {
931         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
932         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
933
934         if (index)
935                 return 0;
936
937         /*
938          * The clock divider is based off the cdclk or PCH rawclk, and would
939          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
940          * divide by 2000 and use that
941          */
942         if (intel_dig_port->port == PORT_A)
943                 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
944         else
945                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
946 }
947
948 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
949 {
950         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
951         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
952
953         if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
954                 /* Workaround for non-ULT HSW */
955                 switch (index) {
956                 case 0: return 63;
957                 case 1: return 72;
958                 default: return 0;
959                 }
960         }
961
962         return ilk_get_aux_clock_divider(intel_dp, index);
963 }
964
965 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
966 {
967         /*
968          * SKL doesn't need us to program the AUX clock divider (Hardware will
969          * derive the clock from CDCLK automatically). We still implement the
970          * get_aux_clock_divider vfunc to plug-in into the existing code.
971          */
972         return index ? 0 : 1;
973 }
974
975 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
976                                      bool has_aux_irq,
977                                      int send_bytes,
978                                      uint32_t aux_clock_divider)
979 {
980         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
981         struct drm_i915_private *dev_priv =
982                         to_i915(intel_dig_port->base.base.dev);
983         uint32_t precharge, timeout;
984
985         if (IS_GEN6(dev_priv))
986                 precharge = 3;
987         else
988                 precharge = 5;
989
990         if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
991                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
992         else
993                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
994
995         return DP_AUX_CH_CTL_SEND_BUSY |
996                DP_AUX_CH_CTL_DONE |
997                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
998                DP_AUX_CH_CTL_TIME_OUT_ERROR |
999                timeout |
1000                DP_AUX_CH_CTL_RECEIVE_ERROR |
1001                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1002                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1003                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1004 }
1005
1006 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1007                                       bool has_aux_irq,
1008                                       int send_bytes,
1009                                       uint32_t unused)
1010 {
1011         return DP_AUX_CH_CTL_SEND_BUSY |
1012                DP_AUX_CH_CTL_DONE |
1013                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
1014                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1015                DP_AUX_CH_CTL_TIME_OUT_1600us |
1016                DP_AUX_CH_CTL_RECEIVE_ERROR |
1017                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1018                DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1019                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1020 }
1021
1022 static int
1023 intel_dp_aux_ch(struct intel_dp *intel_dp,
1024                 const uint8_t *send, int send_bytes,
1025                 uint8_t *recv, int recv_size)
1026 {
1027         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1028         struct drm_i915_private *dev_priv =
1029                         to_i915(intel_dig_port->base.base.dev);
1030         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
1031         uint32_t aux_clock_divider;
1032         int i, ret, recv_bytes;
1033         uint32_t status;
1034         int try, clock = 0;
1035         bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
1036         bool vdd;
1037
1038         pps_lock(intel_dp);
1039
1040         /*
1041          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1042          * In such cases we want to leave VDD enabled and it's up to upper layers
1043          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1044          * ourselves.
1045          */
1046         vdd = edp_panel_vdd_on(intel_dp);
1047
1048         /* dp aux is extremely sensitive to irq latency, hence request the
1049          * lowest possible wakeup latency and so prevent the cpu from going into
1050          * deep sleep states.
1051          */
1052         pm_qos_update_request(&dev_priv->pm_qos, 0);
1053
1054         intel_dp_check_edp(intel_dp);
1055
1056         /* Try to wait for any previous AUX channel activity */
1057         for (try = 0; try < 3; try++) {
1058                 status = I915_READ_NOTRACE(ch_ctl);
1059                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1060                         break;
1061                 msleep(1);
1062         }
1063
1064         if (try == 3) {
1065                 static u32 last_status = -1;
1066                 const u32 status = I915_READ(ch_ctl);
1067
1068                 if (status != last_status) {
1069                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
1070                              status);
1071                         last_status = status;
1072                 }
1073
1074                 ret = -EBUSY;
1075                 goto out;
1076         }
1077
1078         /* Only 5 data registers! */
1079         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1080                 ret = -E2BIG;
1081                 goto out;
1082         }
1083
1084         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1085                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1086                                                           has_aux_irq,
1087                                                           send_bytes,
1088                                                           aux_clock_divider);
1089
1090                 /* Must try at least 3 times according to DP spec */
1091                 for (try = 0; try < 5; try++) {
1092                         /* Load the send data into the aux channel data registers */
1093                         for (i = 0; i < send_bytes; i += 4)
1094                                 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
1095                                            intel_dp_pack_aux(send + i,
1096                                                              send_bytes - i));
1097
1098                         /* Send the command and wait for it to complete */
1099                         I915_WRITE(ch_ctl, send_ctl);
1100
1101                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1102
1103                         /* Clear done status and any errors */
1104                         I915_WRITE(ch_ctl,
1105                                    status |
1106                                    DP_AUX_CH_CTL_DONE |
1107                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
1108                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
1109
1110                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1111                                 continue;
1112
1113                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1114                          *   400us delay required for errors and timeouts
1115                          *   Timeout errors from the HW already meet this
1116                          *   requirement so skip to next iteration
1117                          */
1118                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1119                                 usleep_range(400, 500);
1120                                 continue;
1121                         }
1122                         if (status & DP_AUX_CH_CTL_DONE)
1123                                 goto done;
1124                 }
1125         }
1126
1127         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1128                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1129                 ret = -EBUSY;
1130                 goto out;
1131         }
1132
1133 done:
1134         /* Check for timeout or receive error.
1135          * Timeouts occur when the sink is not connected
1136          */
1137         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1138                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1139                 ret = -EIO;
1140                 goto out;
1141         }
1142
1143         /* Timeouts occur when the device isn't connected, so they're
1144          * "normal" -- don't fill the kernel log with these */
1145         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1146                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1147                 ret = -ETIMEDOUT;
1148                 goto out;
1149         }
1150
1151         /* Unload any bytes sent back from the other side */
1152         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1153                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1154
1155         /*
1156          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1157          * We have no idea of what happened so we return -EBUSY so
1158          * drm layer takes care for the necessary retries.
1159          */
1160         if (recv_bytes == 0 || recv_bytes > 20) {
1161                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1162                               recv_bytes);
1163                 /*
1164                  * FIXME: This patch was created on top of a series that
1165                  * organize the retries at drm level. There EBUSY should
1166                  * also take care for 1ms wait before retrying.
1167                  * That aux retries re-org is still needed and after that is
1168                  * merged we remove this sleep from here.
1169                  */
1170                 usleep_range(1000, 1500);
1171                 ret = -EBUSY;
1172                 goto out;
1173         }
1174
1175         if (recv_bytes > recv_size)
1176                 recv_bytes = recv_size;
1177
1178         for (i = 0; i < recv_bytes; i += 4)
1179                 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
1180                                     recv + i, recv_bytes - i);
1181
1182         ret = recv_bytes;
1183 out:
1184         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1185
1186         if (vdd)
1187                 edp_panel_vdd_off(intel_dp, false);
1188
1189         pps_unlock(intel_dp);
1190
1191         return ret;
1192 }
1193
1194 #define BARE_ADDRESS_SIZE       3
1195 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1196 static ssize_t
1197 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1198 {
1199         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1200         uint8_t txbuf[20], rxbuf[20];
1201         size_t txsize, rxsize;
1202         int ret;
1203
1204         txbuf[0] = (msg->request << 4) |
1205                 ((msg->address >> 16) & 0xf);
1206         txbuf[1] = (msg->address >> 8) & 0xff;
1207         txbuf[2] = msg->address & 0xff;
1208         txbuf[3] = msg->size - 1;
1209
1210         switch (msg->request & ~DP_AUX_I2C_MOT) {
1211         case DP_AUX_NATIVE_WRITE:
1212         case DP_AUX_I2C_WRITE:
1213         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1214                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1215                 rxsize = 2; /* 0 or 1 data bytes */
1216
1217                 if (WARN_ON(txsize > 20))
1218                         return -E2BIG;
1219
1220                 WARN_ON(!msg->buffer != !msg->size);
1221
1222                 if (msg->buffer)
1223                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1224
1225                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1226                 if (ret > 0) {
1227                         msg->reply = rxbuf[0] >> 4;
1228
1229                         if (ret > 1) {
1230                                 /* Number of bytes written in a short write. */
1231                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1232                         } else {
1233                                 /* Return payload size. */
1234                                 ret = msg->size;
1235                         }
1236                 }
1237                 break;
1238
1239         case DP_AUX_NATIVE_READ:
1240         case DP_AUX_I2C_READ:
1241                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1242                 rxsize = msg->size + 1;
1243
1244                 if (WARN_ON(rxsize > 20))
1245                         return -E2BIG;
1246
1247                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1248                 if (ret > 0) {
1249                         msg->reply = rxbuf[0] >> 4;
1250                         /*
1251                          * Assume happy day, and copy the data. The caller is
1252                          * expected to check msg->reply before touching it.
1253                          *
1254                          * Return payload size.
1255                          */
1256                         ret--;
1257                         memcpy(msg->buffer, rxbuf + 1, ret);
1258                 }
1259                 break;
1260
1261         default:
1262                 ret = -EINVAL;
1263                 break;
1264         }
1265
1266         return ret;
1267 }
1268
1269 static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1270                                 enum port port)
1271 {
1272         const struct ddi_vbt_port_info *info =
1273                 &dev_priv->vbt.ddi_port_info[port];
1274         enum port aux_port;
1275
1276         if (!info->alternate_aux_channel) {
1277                 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1278                               port_name(port), port_name(port));
1279                 return port;
1280         }
1281
1282         switch (info->alternate_aux_channel) {
1283         case DP_AUX_A:
1284                 aux_port = PORT_A;
1285                 break;
1286         case DP_AUX_B:
1287                 aux_port = PORT_B;
1288                 break;
1289         case DP_AUX_C:
1290                 aux_port = PORT_C;
1291                 break;
1292         case DP_AUX_D:
1293                 aux_port = PORT_D;
1294                 break;
1295         default:
1296                 MISSING_CASE(info->alternate_aux_channel);
1297                 aux_port = PORT_A;
1298                 break;
1299         }
1300
1301         DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1302                       port_name(aux_port), port_name(port));
1303
1304         return aux_port;
1305 }
1306
1307 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1308                                   enum port port)
1309 {
1310         switch (port) {
1311         case PORT_B:
1312         case PORT_C:
1313         case PORT_D:
1314                 return DP_AUX_CH_CTL(port);
1315         default:
1316                 MISSING_CASE(port);
1317                 return DP_AUX_CH_CTL(PORT_B);
1318         }
1319 }
1320
1321 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1322                                    enum port port, int index)
1323 {
1324         switch (port) {
1325         case PORT_B:
1326         case PORT_C:
1327         case PORT_D:
1328                 return DP_AUX_CH_DATA(port, index);
1329         default:
1330                 MISSING_CASE(port);
1331                 return DP_AUX_CH_DATA(PORT_B, index);
1332         }
1333 }
1334
1335 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1336                                   enum port port)
1337 {
1338         switch (port) {
1339         case PORT_A:
1340                 return DP_AUX_CH_CTL(port);
1341         case PORT_B:
1342         case PORT_C:
1343         case PORT_D:
1344                 return PCH_DP_AUX_CH_CTL(port);
1345         default:
1346                 MISSING_CASE(port);
1347                 return DP_AUX_CH_CTL(PORT_A);
1348         }
1349 }
1350
1351 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1352                                    enum port port, int index)
1353 {
1354         switch (port) {
1355         case PORT_A:
1356                 return DP_AUX_CH_DATA(port, index);
1357         case PORT_B:
1358         case PORT_C:
1359         case PORT_D:
1360                 return PCH_DP_AUX_CH_DATA(port, index);
1361         default:
1362                 MISSING_CASE(port);
1363                 return DP_AUX_CH_DATA(PORT_A, index);
1364         }
1365 }
1366
1367 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1368                                   enum port port)
1369 {
1370         switch (port) {
1371         case PORT_A:
1372         case PORT_B:
1373         case PORT_C:
1374         case PORT_D:
1375                 return DP_AUX_CH_CTL(port);
1376         default:
1377                 MISSING_CASE(port);
1378                 return DP_AUX_CH_CTL(PORT_A);
1379         }
1380 }
1381
1382 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1383                                    enum port port, int index)
1384 {
1385         switch (port) {
1386         case PORT_A:
1387         case PORT_B:
1388         case PORT_C:
1389         case PORT_D:
1390                 return DP_AUX_CH_DATA(port, index);
1391         default:
1392                 MISSING_CASE(port);
1393                 return DP_AUX_CH_DATA(PORT_A, index);
1394         }
1395 }
1396
1397 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1398                                     enum port port)
1399 {
1400         if (INTEL_INFO(dev_priv)->gen >= 9)
1401                 return skl_aux_ctl_reg(dev_priv, port);
1402         else if (HAS_PCH_SPLIT(dev_priv))
1403                 return ilk_aux_ctl_reg(dev_priv, port);
1404         else
1405                 return g4x_aux_ctl_reg(dev_priv, port);
1406 }
1407
1408 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1409                                      enum port port, int index)
1410 {
1411         if (INTEL_INFO(dev_priv)->gen >= 9)
1412                 return skl_aux_data_reg(dev_priv, port, index);
1413         else if (HAS_PCH_SPLIT(dev_priv))
1414                 return ilk_aux_data_reg(dev_priv, port, index);
1415         else
1416                 return g4x_aux_data_reg(dev_priv, port, index);
1417 }
1418
1419 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1420 {
1421         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1422         enum port port = intel_aux_port(dev_priv,
1423                                         dp_to_dig_port(intel_dp)->port);
1424         int i;
1425
1426         intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1427         for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1428                 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1429 }
1430
1431 static void
1432 intel_dp_aux_fini(struct intel_dp *intel_dp)
1433 {
1434         kfree(intel_dp->aux.name);
1435 }
1436
1437 static void
1438 intel_dp_aux_init(struct intel_dp *intel_dp)
1439 {
1440         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1441         enum port port = intel_dig_port->port;
1442
1443         intel_aux_reg_init(intel_dp);
1444         drm_dp_aux_init(&intel_dp->aux);
1445
1446         /* Failure to allocate our preferred name is not critical */
1447         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1448         intel_dp->aux.transfer = intel_dp_aux_transfer;
1449 }
1450
1451 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1452 {
1453         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1454         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1455
1456         if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
1457             IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
1458                 return true;
1459         else
1460                 return false;
1461 }
1462
1463 static void
1464 intel_dp_set_clock(struct intel_encoder *encoder,
1465                    struct intel_crtc_state *pipe_config)
1466 {
1467         struct drm_device *dev = encoder->base.dev;
1468         struct drm_i915_private *dev_priv = to_i915(dev);
1469         const struct dp_link_dpll *divisor = NULL;
1470         int i, count = 0;
1471
1472         if (IS_G4X(dev_priv)) {
1473                 divisor = gen4_dpll;
1474                 count = ARRAY_SIZE(gen4_dpll);
1475         } else if (HAS_PCH_SPLIT(dev_priv)) {
1476                 divisor = pch_dpll;
1477                 count = ARRAY_SIZE(pch_dpll);
1478         } else if (IS_CHERRYVIEW(dev_priv)) {
1479                 divisor = chv_dpll;
1480                 count = ARRAY_SIZE(chv_dpll);
1481         } else if (IS_VALLEYVIEW(dev_priv)) {
1482                 divisor = vlv_dpll;
1483                 count = ARRAY_SIZE(vlv_dpll);
1484         }
1485
1486         if (divisor && count) {
1487                 for (i = 0; i < count; i++) {
1488                         if (pipe_config->port_clock == divisor[i].clock) {
1489                                 pipe_config->dpll = divisor[i].dpll;
1490                                 pipe_config->clock_set = true;
1491                                 break;
1492                         }
1493                 }
1494         }
1495 }
1496
1497 static void snprintf_int_array(char *str, size_t len,
1498                                const int *array, int nelem)
1499 {
1500         int i;
1501
1502         str[0] = '\0';
1503
1504         for (i = 0; i < nelem; i++) {
1505                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1506                 if (r >= len)
1507                         return;
1508                 str += r;
1509                 len -= r;
1510         }
1511 }
1512
1513 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1514 {
1515         char str[128]; /* FIXME: too big for stack? */
1516
1517         if ((drm_debug & DRM_UT_KMS) == 0)
1518                 return;
1519
1520         snprintf_int_array(str, sizeof(str),
1521                            intel_dp->source_rates, intel_dp->num_source_rates);
1522         DRM_DEBUG_KMS("source rates: %s\n", str);
1523
1524         snprintf_int_array(str, sizeof(str),
1525                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1526         DRM_DEBUG_KMS("sink rates: %s\n", str);
1527
1528         snprintf_int_array(str, sizeof(str),
1529                            intel_dp->common_rates, intel_dp->num_common_rates);
1530         DRM_DEBUG_KMS("common rates: %s\n", str);
1531 }
1532
1533 bool
1534 __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
1535 {
1536         u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
1537                                                       DP_SINK_OUI;
1538
1539         return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
1540                sizeof(*desc);
1541 }
1542
1543 bool intel_dp_read_desc(struct intel_dp *intel_dp)
1544 {
1545         struct intel_dp_desc *desc = &intel_dp->desc;
1546         bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
1547                        DP_OUI_SUPPORT;
1548         int dev_id_len;
1549
1550         if (!__intel_dp_read_desc(intel_dp, desc))
1551                 return false;
1552
1553         dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
1554         DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
1555                       drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
1556                       (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
1557                       dev_id_len, desc->device_id,
1558                       desc->hw_rev >> 4, desc->hw_rev & 0xf,
1559                       desc->sw_major_rev, desc->sw_minor_rev);
1560
1561         return true;
1562 }
1563
1564 int
1565 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1566 {
1567         int len;
1568
1569         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1570         if (WARN_ON(len <= 0))
1571                 return 162000;
1572
1573         return intel_dp->common_rates[len - 1];
1574 }
1575
1576 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1577 {
1578         int i = intel_dp_rate_index(intel_dp->sink_rates,
1579                                     intel_dp->num_sink_rates, rate);
1580
1581         if (WARN_ON(i < 0))
1582                 i = 0;
1583
1584         return i;
1585 }
1586
1587 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1588                            uint8_t *link_bw, uint8_t *rate_select)
1589 {
1590         /* eDP 1.4 rate select method. */
1591         if (intel_dp->use_rate_select) {
1592                 *link_bw = 0;
1593                 *rate_select =
1594                         intel_dp_rate_select(intel_dp, port_clock);
1595         } else {
1596                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1597                 *rate_select = 0;
1598         }
1599 }
1600
1601 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1602                                 struct intel_crtc_state *pipe_config)
1603 {
1604         int bpp, bpc;
1605
1606         bpp = pipe_config->pipe_bpp;
1607         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1608
1609         if (bpc > 0)
1610                 bpp = min(bpp, 3*bpc);
1611
1612         /* For DP Compliance we override the computed bpp for the pipe */
1613         if (intel_dp->compliance.test_data.bpc != 0) {
1614                 pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
1615                 pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
1616                 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
1617                               pipe_config->pipe_bpp);
1618         }
1619         return bpp;
1620 }
1621
1622 bool
1623 intel_dp_compute_config(struct intel_encoder *encoder,
1624                         struct intel_crtc_state *pipe_config,
1625                         struct drm_connector_state *conn_state)
1626 {
1627         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1628         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1629         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1630         enum port port = dp_to_dig_port(intel_dp)->port;
1631         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1632         struct intel_connector *intel_connector = intel_dp->attached_connector;
1633         int lane_count, clock;
1634         int min_lane_count = 1;
1635         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1636         /* Conveniently, the link BW constants become indices with a shift...*/
1637         int min_clock = 0;
1638         int max_clock;
1639         int bpp, mode_rate;
1640         int link_avail, link_clock;
1641         int common_len;
1642         uint8_t link_bw, rate_select;
1643
1644         common_len = intel_dp_common_len_rate_limit(intel_dp,
1645                                                     intel_dp->max_link_rate);
1646
1647         /* No common link rates between source and sink */
1648         WARN_ON(common_len <= 0);
1649
1650         max_clock = common_len - 1;
1651
1652         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1653                 pipe_config->has_pch_encoder = true;
1654
1655         pipe_config->has_drrs = false;
1656         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1657
1658         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1659                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1660                                        adjusted_mode);
1661
1662                 if (INTEL_GEN(dev_priv) >= 9) {
1663                         int ret;
1664                         ret = skl_update_scaler_crtc(pipe_config);
1665                         if (ret)
1666                                 return ret;
1667                 }
1668
1669                 if (HAS_GMCH_DISPLAY(dev_priv))
1670                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1671                                                  intel_connector->panel.fitting_mode);
1672                 else
1673                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1674                                                 intel_connector->panel.fitting_mode);
1675         }
1676
1677         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1678                 return false;
1679
1680         /* Use values requested by Compliance Test Request */
1681         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1682                 int index;
1683
1684                 index = intel_dp_rate_index(intel_dp->common_rates,
1685                                             intel_dp->num_common_rates,
1686                                             intel_dp->compliance.test_link_rate);
1687                 if (index >= 0)
1688                         min_clock = max_clock = index;
1689                 min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
1690         }
1691         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1692                       "max bw %d pixel clock %iKHz\n",
1693                       max_lane_count, intel_dp->common_rates[max_clock],
1694                       adjusted_mode->crtc_clock);
1695
1696         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1697          * bpc in between. */
1698         bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1699         if (is_edp(intel_dp)) {
1700
1701                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1702                 if (intel_connector->base.display_info.bpc == 0 &&
1703                         (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1704                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1705                                       dev_priv->vbt.edp.bpp);
1706                         bpp = dev_priv->vbt.edp.bpp;
1707                 }
1708
1709                 /*
1710                  * Use the maximum clock and number of lanes the eDP panel
1711                  * advertizes being capable of. The panels are generally
1712                  * designed to support only a single clock and lane
1713                  * configuration, and typically these values correspond to the
1714                  * native resolution of the panel.
1715                  */
1716                 min_lane_count = max_lane_count;
1717                 min_clock = max_clock;
1718         }
1719
1720         for (; bpp >= 6*3; bpp -= 2*3) {
1721                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1722                                                    bpp);
1723
1724                 for (clock = min_clock; clock <= max_clock; clock++) {
1725                         for (lane_count = min_lane_count;
1726                                 lane_count <= max_lane_count;
1727                                 lane_count <<= 1) {
1728
1729                                 link_clock = intel_dp->common_rates[clock];
1730                                 link_avail = intel_dp_max_data_rate(link_clock,
1731                                                                     lane_count);
1732
1733                                 if (mode_rate <= link_avail) {
1734                                         goto found;
1735                                 }
1736                         }
1737                 }
1738         }
1739
1740         return false;
1741
1742 found:
1743         if (intel_dp->color_range_auto) {
1744                 /*
1745                  * See:
1746                  * CEA-861-E - 5.1 Default Encoding Parameters
1747                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1748                  */
1749                 pipe_config->limited_color_range =
1750                         bpp != 18 &&
1751                         drm_default_rgb_quant_range(adjusted_mode) ==
1752                         HDMI_QUANTIZATION_RANGE_LIMITED;
1753         } else {
1754                 pipe_config->limited_color_range =
1755                         intel_dp->limited_color_range;
1756         }
1757
1758         pipe_config->lane_count = lane_count;
1759
1760         pipe_config->pipe_bpp = bpp;
1761         pipe_config->port_clock = intel_dp->common_rates[clock];
1762
1763         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1764                               &link_bw, &rate_select);
1765
1766         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1767                       link_bw, rate_select, pipe_config->lane_count,
1768                       pipe_config->port_clock, bpp);
1769         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1770                       mode_rate, link_avail);
1771
1772         intel_link_compute_m_n(bpp, lane_count,
1773                                adjusted_mode->crtc_clock,
1774                                pipe_config->port_clock,
1775                                &pipe_config->dp_m_n);
1776
1777         if (intel_connector->panel.downclock_mode != NULL &&
1778                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1779                         pipe_config->has_drrs = true;
1780                         intel_link_compute_m_n(bpp, lane_count,
1781                                 intel_connector->panel.downclock_mode->clock,
1782                                 pipe_config->port_clock,
1783                                 &pipe_config->dp_m2_n2);
1784         }
1785
1786         /*
1787          * DPLL0 VCO may need to be adjusted to get the correct
1788          * clock for eDP. This will affect cdclk as well.
1789          */
1790         if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
1791                 int vco;
1792
1793                 switch (pipe_config->port_clock / 2) {
1794                 case 108000:
1795                 case 216000:
1796                         vco = 8640000;
1797                         break;
1798                 default:
1799                         vco = 8100000;
1800                         break;
1801                 }
1802
1803                 to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
1804         }
1805
1806         if (!HAS_DDI(dev_priv))
1807                 intel_dp_set_clock(encoder, pipe_config);
1808
1809         return true;
1810 }
1811
1812 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1813                               int link_rate, uint8_t lane_count,
1814                               bool link_mst)
1815 {
1816         intel_dp->link_rate = link_rate;
1817         intel_dp->lane_count = lane_count;
1818         intel_dp->link_mst = link_mst;
1819 }
1820
1821 static void intel_dp_prepare(struct intel_encoder *encoder,
1822                              struct intel_crtc_state *pipe_config)
1823 {
1824         struct drm_device *dev = encoder->base.dev;
1825         struct drm_i915_private *dev_priv = to_i915(dev);
1826         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1827         enum port port = dp_to_dig_port(intel_dp)->port;
1828         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1829         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1830
1831         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1832                                  pipe_config->lane_count,
1833                                  intel_crtc_has_type(pipe_config,
1834                                                      INTEL_OUTPUT_DP_MST));
1835
1836         /*
1837          * There are four kinds of DP registers:
1838          *
1839          *      IBX PCH
1840          *      SNB CPU
1841          *      IVB CPU
1842          *      CPT PCH
1843          *
1844          * IBX PCH and CPU are the same for almost everything,
1845          * except that the CPU DP PLL is configured in this
1846          * register
1847          *
1848          * CPT PCH is quite different, having many bits moved
1849          * to the TRANS_DP_CTL register instead. That
1850          * configuration happens (oddly) in ironlake_pch_enable
1851          */
1852
1853         /* Preserve the BIOS-computed detected bit. This is
1854          * supposed to be read-only.
1855          */
1856         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1857
1858         /* Handle DP bits in common between all three register formats */
1859         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1860         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1861
1862         /* Split out the IBX/CPU vs CPT settings */
1863
1864         if (IS_GEN7(dev_priv) && port == PORT_A) {
1865                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1866                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1867                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1868                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1869                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1870
1871                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1872                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1873
1874                 intel_dp->DP |= crtc->pipe << 29;
1875         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1876                 u32 trans_dp;
1877
1878                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1879
1880                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1881                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1882                         trans_dp |= TRANS_DP_ENH_FRAMING;
1883                 else
1884                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1885                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1886         } else {
1887                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1888                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1889
1890                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1891                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1892                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1893                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1894                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1895
1896                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1897                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1898
1899                 if (IS_CHERRYVIEW(dev_priv))
1900                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1901                 else if (crtc->pipe == PIPE_B)
1902                         intel_dp->DP |= DP_PIPEB_SELECT;
1903         }
1904 }
1905
1906 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1907 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1908
1909 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1910 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1911
1912 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1913 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1914
1915 static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1916                                    struct intel_dp *intel_dp);
1917
1918 static void wait_panel_status(struct intel_dp *intel_dp,
1919                                        u32 mask,
1920                                        u32 value)
1921 {
1922         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1923         struct drm_i915_private *dev_priv = to_i915(dev);
1924         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1925
1926         lockdep_assert_held(&dev_priv->pps_mutex);
1927
1928         intel_pps_verify_state(dev_priv, intel_dp);
1929
1930         pp_stat_reg = _pp_stat_reg(intel_dp);
1931         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1932
1933         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1934                         mask, value,
1935                         I915_READ(pp_stat_reg),
1936                         I915_READ(pp_ctrl_reg));
1937
1938         if (intel_wait_for_register(dev_priv,
1939                                     pp_stat_reg, mask, value,
1940                                     5000))
1941                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1942                                 I915_READ(pp_stat_reg),
1943                                 I915_READ(pp_ctrl_reg));
1944
1945         DRM_DEBUG_KMS("Wait complete\n");
1946 }
1947
1948 static void wait_panel_on(struct intel_dp *intel_dp)
1949 {
1950         DRM_DEBUG_KMS("Wait for panel power on\n");
1951         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1952 }
1953
1954 static void wait_panel_off(struct intel_dp *intel_dp)
1955 {
1956         DRM_DEBUG_KMS("Wait for panel power off time\n");
1957         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1958 }
1959
1960 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1961 {
1962         ktime_t panel_power_on_time;
1963         s64 panel_power_off_duration;
1964
1965         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1966
1967         /* take the difference of currrent time and panel power off time
1968          * and then make panel wait for t11_t12 if needed. */
1969         panel_power_on_time = ktime_get_boottime();
1970         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1971
1972         /* When we disable the VDD override bit last we have to do the manual
1973          * wait. */
1974         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1975                 wait_remaining_ms_from_jiffies(jiffies,
1976                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1977
1978         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1979 }
1980
1981 static void wait_backlight_on(struct intel_dp *intel_dp)
1982 {
1983         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1984                                        intel_dp->backlight_on_delay);
1985 }
1986
1987 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1988 {
1989         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1990                                        intel_dp->backlight_off_delay);
1991 }
1992
1993 /* Read the current pp_control value, unlocking the register if it
1994  * is locked
1995  */
1996
1997 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1998 {
1999         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2000         struct drm_i915_private *dev_priv = to_i915(dev);
2001         u32 control;
2002
2003         lockdep_assert_held(&dev_priv->pps_mutex);
2004
2005         control = I915_READ(_pp_ctrl_reg(intel_dp));
2006         if (WARN_ON(!HAS_DDI(dev_priv) &&
2007                     (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2008                 control &= ~PANEL_UNLOCK_MASK;
2009                 control |= PANEL_UNLOCK_REGS;
2010         }
2011         return control;
2012 }
2013
2014 /*
2015  * Must be paired with edp_panel_vdd_off().
2016  * Must hold pps_mutex around the whole on/off sequence.
2017  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2018  */
2019 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2020 {
2021         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2022         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2023         struct drm_i915_private *dev_priv = to_i915(dev);
2024         u32 pp;
2025         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2026         bool need_to_disable = !intel_dp->want_panel_vdd;
2027
2028         lockdep_assert_held(&dev_priv->pps_mutex);
2029
2030         if (!is_edp(intel_dp))
2031                 return false;
2032
2033         cancel_delayed_work(&intel_dp->panel_vdd_work);
2034         intel_dp->want_panel_vdd = true;
2035
2036         if (edp_have_panel_vdd(intel_dp))
2037                 return need_to_disable;
2038
2039         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
2040
2041         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2042                       port_name(intel_dig_port->port));
2043
2044         if (!edp_have_panel_power(intel_dp))
2045                 wait_panel_power_cycle(intel_dp);
2046
2047         pp = ironlake_get_pp_control(intel_dp);
2048         pp |= EDP_FORCE_VDD;
2049
2050         pp_stat_reg = _pp_stat_reg(intel_dp);
2051         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2052
2053         I915_WRITE(pp_ctrl_reg, pp);
2054         POSTING_READ(pp_ctrl_reg);
2055         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2056                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2057         /*
2058          * If the panel wasn't on, delay before accessing aux channel
2059          */
2060         if (!edp_have_panel_power(intel_dp)) {
2061                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2062                               port_name(intel_dig_port->port));
2063                 msleep(intel_dp->panel_power_up_delay);
2064         }
2065
2066         return need_to_disable;
2067 }
2068
2069 /*
2070  * Must be paired with intel_edp_panel_vdd_off() or
2071  * intel_edp_panel_off().
2072  * Nested calls to these functions are not allowed since
2073  * we drop the lock. Caller must use some higher level
2074  * locking to prevent nested calls from other threads.
2075  */
2076 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2077 {
2078         bool vdd;
2079
2080         if (!is_edp(intel_dp))
2081                 return;
2082
2083         pps_lock(intel_dp);
2084         vdd = edp_panel_vdd_on(intel_dp);
2085         pps_unlock(intel_dp);
2086
2087         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2088              port_name(dp_to_dig_port(intel_dp)->port));
2089 }
2090
2091 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2092 {
2093         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2094         struct drm_i915_private *dev_priv = to_i915(dev);
2095         struct intel_digital_port *intel_dig_port =
2096                 dp_to_dig_port(intel_dp);
2097         u32 pp;
2098         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2099
2100         lockdep_assert_held(&dev_priv->pps_mutex);
2101
2102         WARN_ON(intel_dp->want_panel_vdd);
2103
2104         if (!edp_have_panel_vdd(intel_dp))
2105                 return;
2106
2107         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2108                       port_name(intel_dig_port->port));
2109
2110         pp = ironlake_get_pp_control(intel_dp);
2111         pp &= ~EDP_FORCE_VDD;
2112
2113         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2114         pp_stat_reg = _pp_stat_reg(intel_dp);
2115
2116         I915_WRITE(pp_ctrl_reg, pp);
2117         POSTING_READ(pp_ctrl_reg);
2118
2119         /* Make sure sequencer is idle before allowing subsequent activity */
2120         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2121         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2122
2123         if ((pp & PANEL_POWER_ON) == 0)
2124                 intel_dp->panel_power_off_time = ktime_get_boottime();
2125
2126         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2127 }
2128
2129 static void edp_panel_vdd_work(struct work_struct *__work)
2130 {
2131         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2132                                                  struct intel_dp, panel_vdd_work);
2133
2134         pps_lock(intel_dp);
2135         if (!intel_dp->want_panel_vdd)
2136                 edp_panel_vdd_off_sync(intel_dp);
2137         pps_unlock(intel_dp);
2138 }
2139
2140 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2141 {
2142         unsigned long delay;
2143
2144         /*
2145          * Queue the timer to fire a long time from now (relative to the power
2146          * down delay) to keep the panel power up across a sequence of
2147          * operations.
2148          */
2149         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2150         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2151 }
2152
2153 /*
2154  * Must be paired with edp_panel_vdd_on().
2155  * Must hold pps_mutex around the whole on/off sequence.
2156  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2157  */
2158 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2159 {
2160         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2161
2162         lockdep_assert_held(&dev_priv->pps_mutex);
2163
2164         if (!is_edp(intel_dp))
2165                 return;
2166
2167         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2168              port_name(dp_to_dig_port(intel_dp)->port));
2169
2170         intel_dp->want_panel_vdd = false;
2171
2172         if (sync)
2173                 edp_panel_vdd_off_sync(intel_dp);
2174         else
2175                 edp_panel_vdd_schedule_off(intel_dp);
2176 }
2177
2178 static void edp_panel_on(struct intel_dp *intel_dp)
2179 {
2180         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2181         struct drm_i915_private *dev_priv = to_i915(dev);
2182         u32 pp;
2183         i915_reg_t pp_ctrl_reg;
2184
2185         lockdep_assert_held(&dev_priv->pps_mutex);
2186
2187         if (!is_edp(intel_dp))
2188                 return;
2189
2190         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2191                       port_name(dp_to_dig_port(intel_dp)->port));
2192
2193         if (WARN(edp_have_panel_power(intel_dp),
2194                  "eDP port %c panel power already on\n",
2195                  port_name(dp_to_dig_port(intel_dp)->port)))
2196                 return;
2197
2198         wait_panel_power_cycle(intel_dp);
2199
2200         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2201         pp = ironlake_get_pp_control(intel_dp);
2202         if (IS_GEN5(dev_priv)) {
2203                 /* ILK workaround: disable reset around power sequence */
2204                 pp &= ~PANEL_POWER_RESET;
2205                 I915_WRITE(pp_ctrl_reg, pp);
2206                 POSTING_READ(pp_ctrl_reg);
2207         }
2208
2209         pp |= PANEL_POWER_ON;
2210         if (!IS_GEN5(dev_priv))
2211                 pp |= PANEL_POWER_RESET;
2212
2213         I915_WRITE(pp_ctrl_reg, pp);
2214         POSTING_READ(pp_ctrl_reg);
2215
2216         wait_panel_on(intel_dp);
2217         intel_dp->last_power_on = jiffies;
2218
2219         if (IS_GEN5(dev_priv)) {
2220                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2221                 I915_WRITE(pp_ctrl_reg, pp);
2222                 POSTING_READ(pp_ctrl_reg);
2223         }
2224 }
2225
2226 void intel_edp_panel_on(struct intel_dp *intel_dp)
2227 {
2228         if (!is_edp(intel_dp))
2229                 return;
2230
2231         pps_lock(intel_dp);
2232         edp_panel_on(intel_dp);
2233         pps_unlock(intel_dp);
2234 }
2235
2236
2237 static void edp_panel_off(struct intel_dp *intel_dp)
2238 {
2239         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2240         struct drm_i915_private *dev_priv = to_i915(dev);
2241         u32 pp;
2242         i915_reg_t pp_ctrl_reg;
2243
2244         lockdep_assert_held(&dev_priv->pps_mutex);
2245
2246         if (!is_edp(intel_dp))
2247                 return;
2248
2249         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2250                       port_name(dp_to_dig_port(intel_dp)->port));
2251
2252         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2253              port_name(dp_to_dig_port(intel_dp)->port));
2254
2255         pp = ironlake_get_pp_control(intel_dp);
2256         /* We need to switch off panel power _and_ force vdd, for otherwise some
2257          * panels get very unhappy and cease to work. */
2258         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2259                 EDP_BLC_ENABLE);
2260
2261         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2262
2263         intel_dp->want_panel_vdd = false;
2264
2265         I915_WRITE(pp_ctrl_reg, pp);
2266         POSTING_READ(pp_ctrl_reg);
2267
2268         intel_dp->panel_power_off_time = ktime_get_boottime();
2269         wait_panel_off(intel_dp);
2270
2271         /* We got a reference when we enabled the VDD. */
2272         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2273 }
2274
2275 void intel_edp_panel_off(struct intel_dp *intel_dp)
2276 {
2277         if (!is_edp(intel_dp))
2278                 return;
2279
2280         pps_lock(intel_dp);
2281         edp_panel_off(intel_dp);
2282         pps_unlock(intel_dp);
2283 }
2284
2285 /* Enable backlight in the panel power control. */
2286 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2287 {
2288         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2289         struct drm_device *dev = intel_dig_port->base.base.dev;
2290         struct drm_i915_private *dev_priv = to_i915(dev);
2291         u32 pp;
2292         i915_reg_t pp_ctrl_reg;
2293
2294         /*
2295          * If we enable the backlight right away following a panel power
2296          * on, we may see slight flicker as the panel syncs with the eDP
2297          * link.  So delay a bit to make sure the image is solid before
2298          * allowing it to appear.
2299          */
2300         wait_backlight_on(intel_dp);
2301
2302         pps_lock(intel_dp);
2303
2304         pp = ironlake_get_pp_control(intel_dp);
2305         pp |= EDP_BLC_ENABLE;
2306
2307         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2308
2309         I915_WRITE(pp_ctrl_reg, pp);
2310         POSTING_READ(pp_ctrl_reg);
2311
2312         pps_unlock(intel_dp);
2313 }
2314
2315 /* Enable backlight PWM and backlight PP control. */
2316 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2317 {
2318         if (!is_edp(intel_dp))
2319                 return;
2320
2321         DRM_DEBUG_KMS("\n");
2322
2323         intel_panel_enable_backlight(intel_dp->attached_connector);
2324         _intel_edp_backlight_on(intel_dp);
2325 }
2326
2327 /* Disable backlight in the panel power control. */
2328 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2329 {
2330         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2331         struct drm_i915_private *dev_priv = to_i915(dev);
2332         u32 pp;
2333         i915_reg_t pp_ctrl_reg;
2334
2335         if (!is_edp(intel_dp))
2336                 return;
2337
2338         pps_lock(intel_dp);
2339
2340         pp = ironlake_get_pp_control(intel_dp);
2341         pp &= ~EDP_BLC_ENABLE;
2342
2343         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2344
2345         I915_WRITE(pp_ctrl_reg, pp);
2346         POSTING_READ(pp_ctrl_reg);
2347
2348         pps_unlock(intel_dp);
2349
2350         intel_dp->last_backlight_off = jiffies;
2351         edp_wait_backlight_off(intel_dp);
2352 }
2353
2354 /* Disable backlight PP control and backlight PWM. */
2355 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2356 {
2357         if (!is_edp(intel_dp))
2358                 return;
2359
2360         DRM_DEBUG_KMS("\n");
2361
2362         _intel_edp_backlight_off(intel_dp);
2363         intel_panel_disable_backlight(intel_dp->attached_connector);
2364 }
2365
2366 /*
2367  * Hook for controlling the panel power control backlight through the bl_power
2368  * sysfs attribute. Take care to handle multiple calls.
2369  */
2370 static void intel_edp_backlight_power(struct intel_connector *connector,
2371                                       bool enable)
2372 {
2373         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2374         bool is_enabled;
2375
2376         pps_lock(intel_dp);
2377         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2378         pps_unlock(intel_dp);
2379
2380         if (is_enabled == enable)
2381                 return;
2382
2383         DRM_DEBUG_KMS("panel power control backlight %s\n",
2384                       enable ? "enable" : "disable");
2385
2386         if (enable)
2387                 _intel_edp_backlight_on(intel_dp);
2388         else
2389                 _intel_edp_backlight_off(intel_dp);
2390 }
2391
2392 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2393 {
2394         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2395         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2396         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2397
2398         I915_STATE_WARN(cur_state != state,
2399                         "DP port %c state assertion failure (expected %s, current %s)\n",
2400                         port_name(dig_port->port),
2401                         onoff(state), onoff(cur_state));
2402 }
2403 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2404
2405 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2406 {
2407         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2408
2409         I915_STATE_WARN(cur_state != state,
2410                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2411                         onoff(state), onoff(cur_state));
2412 }
2413 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2414 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2415
2416 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2417                                 struct intel_crtc_state *pipe_config)
2418 {
2419         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2420         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2421
2422         assert_pipe_disabled(dev_priv, crtc->pipe);
2423         assert_dp_port_disabled(intel_dp);
2424         assert_edp_pll_disabled(dev_priv);
2425
2426         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2427                       pipe_config->port_clock);
2428
2429         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2430
2431         if (pipe_config->port_clock == 162000)
2432                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2433         else
2434                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2435
2436         I915_WRITE(DP_A, intel_dp->DP);
2437         POSTING_READ(DP_A);
2438         udelay(500);
2439
2440         /*
2441          * [DevILK] Work around required when enabling DP PLL
2442          * while a pipe is enabled going to FDI:
2443          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2444          * 2. Program DP PLL enable
2445          */
2446         if (IS_GEN5(dev_priv))
2447                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2448
2449         intel_dp->DP |= DP_PLL_ENABLE;
2450
2451         I915_WRITE(DP_A, intel_dp->DP);
2452         POSTING_READ(DP_A);
2453         udelay(200);
2454 }
2455
2456 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2457 {
2458         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2459         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2461
2462         assert_pipe_disabled(dev_priv, crtc->pipe);
2463         assert_dp_port_disabled(intel_dp);
2464         assert_edp_pll_enabled(dev_priv);
2465
2466         DRM_DEBUG_KMS("disabling eDP PLL\n");
2467
2468         intel_dp->DP &= ~DP_PLL_ENABLE;
2469
2470         I915_WRITE(DP_A, intel_dp->DP);
2471         POSTING_READ(DP_A);
2472         udelay(200);
2473 }
2474
2475 /* If the sink supports it, try to set the power state appropriately */
2476 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2477 {
2478         int ret, i;
2479
2480         /* Should have a valid DPCD by this point */
2481         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2482                 return;
2483
2484         if (mode != DRM_MODE_DPMS_ON) {
2485                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2486                                          DP_SET_POWER_D3);
2487         } else {
2488                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2489
2490                 /*
2491                  * When turning on, we need to retry for 1ms to give the sink
2492                  * time to wake up.
2493                  */
2494                 for (i = 0; i < 3; i++) {
2495                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2496                                                  DP_SET_POWER_D0);
2497                         if (ret == 1)
2498                                 break;
2499                         msleep(1);
2500                 }
2501
2502                 if (ret == 1 && lspcon->active)
2503                         lspcon_wait_pcon_mode(lspcon);
2504         }
2505
2506         if (ret != 1)
2507                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2508                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2509 }
2510
2511 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2512                                   enum pipe *pipe)
2513 {
2514         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2515         enum port port = dp_to_dig_port(intel_dp)->port;
2516         struct drm_device *dev = encoder->base.dev;
2517         struct drm_i915_private *dev_priv = to_i915(dev);
2518         u32 tmp;
2519         bool ret;
2520
2521         if (!intel_display_power_get_if_enabled(dev_priv,
2522                                                 encoder->power_domain))
2523                 return false;
2524
2525         ret = false;
2526
2527         tmp = I915_READ(intel_dp->output_reg);
2528
2529         if (!(tmp & DP_PORT_EN))
2530                 goto out;
2531
2532         if (IS_GEN7(dev_priv) && port == PORT_A) {
2533                 *pipe = PORT_TO_PIPE_CPT(tmp);
2534         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2535                 enum pipe p;
2536
2537                 for_each_pipe(dev_priv, p) {
2538                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2539                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2540                                 *pipe = p;
2541                                 ret = true;
2542
2543                                 goto out;
2544                         }
2545                 }
2546
2547                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2548                               i915_mmio_reg_offset(intel_dp->output_reg));
2549         } else if (IS_CHERRYVIEW(dev_priv)) {
2550                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2551         } else {
2552                 *pipe = PORT_TO_PIPE(tmp);
2553         }
2554
2555         ret = true;
2556
2557 out:
2558         intel_display_power_put(dev_priv, encoder->power_domain);
2559
2560         return ret;
2561 }
2562
2563 static void intel_dp_get_config(struct intel_encoder *encoder,
2564                                 struct intel_crtc_state *pipe_config)
2565 {
2566         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2567         u32 tmp, flags = 0;
2568         struct drm_device *dev = encoder->base.dev;
2569         struct drm_i915_private *dev_priv = to_i915(dev);
2570         enum port port = dp_to_dig_port(intel_dp)->port;
2571         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2572
2573         tmp = I915_READ(intel_dp->output_reg);
2574
2575         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2576
2577         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2578                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2579
2580                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2581                         flags |= DRM_MODE_FLAG_PHSYNC;
2582                 else
2583                         flags |= DRM_MODE_FLAG_NHSYNC;
2584
2585                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2586                         flags |= DRM_MODE_FLAG_PVSYNC;
2587                 else
2588                         flags |= DRM_MODE_FLAG_NVSYNC;
2589         } else {
2590                 if (tmp & DP_SYNC_HS_HIGH)
2591                         flags |= DRM_MODE_FLAG_PHSYNC;
2592                 else
2593                         flags |= DRM_MODE_FLAG_NHSYNC;
2594
2595                 if (tmp & DP_SYNC_VS_HIGH)
2596                         flags |= DRM_MODE_FLAG_PVSYNC;
2597                 else
2598                         flags |= DRM_MODE_FLAG_NVSYNC;
2599         }
2600
2601         pipe_config->base.adjusted_mode.flags |= flags;
2602
2603         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2604                 pipe_config->limited_color_range = true;
2605
2606         pipe_config->lane_count =
2607                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2608
2609         intel_dp_get_m_n(crtc, pipe_config);
2610
2611         if (port == PORT_A) {
2612                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2613                         pipe_config->port_clock = 162000;
2614                 else
2615                         pipe_config->port_clock = 270000;
2616         }
2617
2618         pipe_config->base.adjusted_mode.crtc_clock =
2619                 intel_dotclock_calculate(pipe_config->port_clock,
2620                                          &pipe_config->dp_m_n);
2621
2622         if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2623             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2624                 /*
2625                  * This is a big fat ugly hack.
2626                  *
2627                  * Some machines in UEFI boot mode provide us a VBT that has 18
2628                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2629                  * unknown we fail to light up. Yet the same BIOS boots up with
2630                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2631                  * max, not what it tells us to use.
2632                  *
2633                  * Note: This will still be broken if the eDP panel is not lit
2634                  * up by the BIOS, and thus we can't get the mode at module
2635                  * load.
2636                  */
2637                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2638                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2639                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2640         }
2641 }
2642
2643 static void intel_disable_dp(struct intel_encoder *encoder,
2644                              struct intel_crtc_state *old_crtc_state,
2645                              struct drm_connector_state *old_conn_state)
2646 {
2647         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2648         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2649
2650         if (old_crtc_state->has_audio)
2651                 intel_audio_codec_disable(encoder);
2652
2653         if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
2654                 intel_psr_disable(intel_dp);
2655
2656         /* Make sure the panel is off before trying to change the mode. But also
2657          * ensure that we have vdd while we switch off the panel. */
2658         intel_edp_panel_vdd_on(intel_dp);
2659         intel_edp_backlight_off(intel_dp);
2660         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2661         intel_edp_panel_off(intel_dp);
2662
2663         /* disable the port before the pipe on g4x */
2664         if (INTEL_GEN(dev_priv) < 5)
2665                 intel_dp_link_down(intel_dp);
2666 }
2667
2668 static void ilk_post_disable_dp(struct intel_encoder *encoder,
2669                                 struct intel_crtc_state *old_crtc_state,
2670                                 struct drm_connector_state *old_conn_state)
2671 {
2672         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2673         enum port port = dp_to_dig_port(intel_dp)->port;
2674
2675         intel_dp_link_down(intel_dp);
2676
2677         /* Only ilk+ has port A */
2678         if (port == PORT_A)
2679                 ironlake_edp_pll_off(intel_dp);
2680 }
2681
2682 static void vlv_post_disable_dp(struct intel_encoder *encoder,
2683                                 struct intel_crtc_state *old_crtc_state,
2684                                 struct drm_connector_state *old_conn_state)
2685 {
2686         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2687
2688         intel_dp_link_down(intel_dp);
2689 }
2690
2691 static void chv_post_disable_dp(struct intel_encoder *encoder,
2692                                 struct intel_crtc_state *old_crtc_state,
2693                                 struct drm_connector_state *old_conn_state)
2694 {
2695         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2696         struct drm_device *dev = encoder->base.dev;
2697         struct drm_i915_private *dev_priv = to_i915(dev);
2698
2699         intel_dp_link_down(intel_dp);
2700
2701         mutex_lock(&dev_priv->sb_lock);
2702
2703         /* Assert data lane reset */
2704         chv_data_lane_soft_reset(encoder, true);
2705
2706         mutex_unlock(&dev_priv->sb_lock);
2707 }
2708
2709 static void
2710 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2711                          uint32_t *DP,
2712                          uint8_t dp_train_pat)
2713 {
2714         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2715         struct drm_device *dev = intel_dig_port->base.base.dev;
2716         struct drm_i915_private *dev_priv = to_i915(dev);
2717         enum port port = intel_dig_port->port;
2718
2719         if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2720                 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2721                               dp_train_pat & DP_TRAINING_PATTERN_MASK);
2722
2723         if (HAS_DDI(dev_priv)) {
2724                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2725
2726                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2727                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2728                 else
2729                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2730
2731                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2732                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2733                 case DP_TRAINING_PATTERN_DISABLE:
2734                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2735
2736                         break;
2737                 case DP_TRAINING_PATTERN_1:
2738                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2739                         break;
2740                 case DP_TRAINING_PATTERN_2:
2741                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2742                         break;
2743                 case DP_TRAINING_PATTERN_3:
2744                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2745                         break;
2746                 }
2747                 I915_WRITE(DP_TP_CTL(port), temp);
2748
2749         } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2750                    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2751                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2752
2753                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2754                 case DP_TRAINING_PATTERN_DISABLE:
2755                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2756                         break;
2757                 case DP_TRAINING_PATTERN_1:
2758                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2759                         break;
2760                 case DP_TRAINING_PATTERN_2:
2761                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2762                         break;
2763                 case DP_TRAINING_PATTERN_3:
2764                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2765                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2766                         break;
2767                 }
2768
2769         } else {
2770                 if (IS_CHERRYVIEW(dev_priv))
2771                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2772                 else
2773                         *DP &= ~DP_LINK_TRAIN_MASK;
2774
2775                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2776                 case DP_TRAINING_PATTERN_DISABLE:
2777                         *DP |= DP_LINK_TRAIN_OFF;
2778                         break;
2779                 case DP_TRAINING_PATTERN_1:
2780                         *DP |= DP_LINK_TRAIN_PAT_1;
2781                         break;
2782                 case DP_TRAINING_PATTERN_2:
2783                         *DP |= DP_LINK_TRAIN_PAT_2;
2784                         break;
2785                 case DP_TRAINING_PATTERN_3:
2786                         if (IS_CHERRYVIEW(dev_priv)) {
2787                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2788                         } else {
2789                                 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2790                                 *DP |= DP_LINK_TRAIN_PAT_2;
2791                         }
2792                         break;
2793                 }
2794         }
2795 }
2796
2797 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2798                                  struct intel_crtc_state *old_crtc_state)
2799 {
2800         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2801         struct drm_i915_private *dev_priv = to_i915(dev);
2802
2803         /* enable with pattern 1 (as per spec) */
2804
2805         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2806
2807         /*
2808          * Magic for VLV/CHV. We _must_ first set up the register
2809          * without actually enabling the port, and then do another
2810          * write to enable the port. Otherwise link training will
2811          * fail when the power sequencer is freshly used for this port.
2812          */
2813         intel_dp->DP |= DP_PORT_EN;
2814         if (old_crtc_state->has_audio)
2815                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2816
2817         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2818         POSTING_READ(intel_dp->output_reg);
2819 }
2820
2821 static void intel_enable_dp(struct intel_encoder *encoder,
2822                             struct intel_crtc_state *pipe_config,
2823                             struct drm_connector_state *conn_state)
2824 {
2825         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2826         struct drm_device *dev = encoder->base.dev;
2827         struct drm_i915_private *dev_priv = to_i915(dev);
2828         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2829         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2830         enum pipe pipe = crtc->pipe;
2831
2832         if (WARN_ON(dp_reg & DP_PORT_EN))
2833                 return;
2834
2835         pps_lock(intel_dp);
2836
2837         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2838                 vlv_init_panel_power_sequencer(intel_dp);
2839
2840         intel_dp_enable_port(intel_dp, pipe_config);
2841
2842         edp_panel_vdd_on(intel_dp);
2843         edp_panel_on(intel_dp);
2844         edp_panel_vdd_off(intel_dp, true);
2845
2846         pps_unlock(intel_dp);
2847
2848         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2849                 unsigned int lane_mask = 0x0;
2850
2851                 if (IS_CHERRYVIEW(dev_priv))
2852                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2853
2854                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2855                                     lane_mask);
2856         }
2857
2858         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2859         intel_dp_start_link_train(intel_dp);
2860         intel_dp_stop_link_train(intel_dp);
2861
2862         if (pipe_config->has_audio) {
2863                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2864                                  pipe_name(pipe));
2865                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
2866         }
2867 }
2868
2869 static void g4x_enable_dp(struct intel_encoder *encoder,
2870                           struct intel_crtc_state *pipe_config,
2871                           struct drm_connector_state *conn_state)
2872 {
2873         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2874
2875         intel_enable_dp(encoder, pipe_config, conn_state);
2876         intel_edp_backlight_on(intel_dp);
2877 }
2878
2879 static void vlv_enable_dp(struct intel_encoder *encoder,
2880                           struct intel_crtc_state *pipe_config,
2881                           struct drm_connector_state *conn_state)
2882 {
2883         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2884
2885         intel_edp_backlight_on(intel_dp);
2886         intel_psr_enable(intel_dp);
2887 }
2888
2889 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
2890                               struct intel_crtc_state *pipe_config,
2891                               struct drm_connector_state *conn_state)
2892 {
2893         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2894         enum port port = dp_to_dig_port(intel_dp)->port;
2895
2896         intel_dp_prepare(encoder, pipe_config);
2897
2898         /* Only ilk+ has port A */
2899         if (port == PORT_A)
2900                 ironlake_edp_pll_on(intel_dp, pipe_config);
2901 }
2902
2903 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2904 {
2905         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2906         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2907         enum pipe pipe = intel_dp->pps_pipe;
2908         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
2909
2910         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2911
2912         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2913                 return;
2914
2915         edp_panel_vdd_off_sync(intel_dp);
2916
2917         /*
2918          * VLV seems to get confused when multiple power seqeuencers
2919          * have the same port selected (even if only one has power/vdd
2920          * enabled). The failure manifests as vlv_wait_port_ready() failing
2921          * CHV on the other hand doesn't seem to mind having the same port
2922          * selected in multiple power seqeuencers, but let's clear the
2923          * port select always when logically disconnecting a power sequencer
2924          * from a port.
2925          */
2926         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2927                       pipe_name(pipe), port_name(intel_dig_port->port));
2928         I915_WRITE(pp_on_reg, 0);
2929         POSTING_READ(pp_on_reg);
2930
2931         intel_dp->pps_pipe = INVALID_PIPE;
2932 }
2933
2934 static void vlv_steal_power_sequencer(struct drm_device *dev,
2935                                       enum pipe pipe)
2936 {
2937         struct drm_i915_private *dev_priv = to_i915(dev);
2938         struct intel_encoder *encoder;
2939
2940         lockdep_assert_held(&dev_priv->pps_mutex);
2941
2942         for_each_intel_encoder(dev, encoder) {
2943                 struct intel_dp *intel_dp;
2944                 enum port port;
2945
2946                 if (encoder->type != INTEL_OUTPUT_DP &&
2947                     encoder->type != INTEL_OUTPUT_EDP)
2948                         continue;
2949
2950                 intel_dp = enc_to_intel_dp(&encoder->base);
2951                 port = dp_to_dig_port(intel_dp)->port;
2952
2953                 WARN(intel_dp->active_pipe == pipe,
2954                      "stealing pipe %c power sequencer from active (e)DP port %c\n",
2955                      pipe_name(pipe), port_name(port));
2956
2957                 if (intel_dp->pps_pipe != pipe)
2958                         continue;
2959
2960                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2961                               pipe_name(pipe), port_name(port));
2962
2963                 /* make sure vdd is off before we steal it */
2964                 vlv_detach_power_sequencer(intel_dp);
2965         }
2966 }
2967
2968 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2969 {
2970         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2971         struct intel_encoder *encoder = &intel_dig_port->base;
2972         struct drm_device *dev = encoder->base.dev;
2973         struct drm_i915_private *dev_priv = to_i915(dev);
2974         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2975
2976         lockdep_assert_held(&dev_priv->pps_mutex);
2977
2978         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2979
2980         if (intel_dp->pps_pipe != INVALID_PIPE &&
2981             intel_dp->pps_pipe != crtc->pipe) {
2982                 /*
2983                  * If another power sequencer was being used on this
2984                  * port previously make sure to turn off vdd there while
2985                  * we still have control of it.
2986                  */
2987                 vlv_detach_power_sequencer(intel_dp);
2988         }
2989
2990         /*
2991          * We may be stealing the power
2992          * sequencer from another port.
2993          */
2994         vlv_steal_power_sequencer(dev, crtc->pipe);
2995
2996         intel_dp->active_pipe = crtc->pipe;
2997
2998         if (!is_edp(intel_dp))
2999                 return;
3000
3001         /* now it's all ours */
3002         intel_dp->pps_pipe = crtc->pipe;
3003
3004         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3005                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
3006
3007         /* init power sequencer on this pipe and port */
3008         intel_dp_init_panel_power_sequencer(dev, intel_dp);
3009         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
3010 }
3011
3012 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3013                               struct intel_crtc_state *pipe_config,
3014                               struct drm_connector_state *conn_state)
3015 {
3016         vlv_phy_pre_encoder_enable(encoder);
3017
3018         intel_enable_dp(encoder, pipe_config, conn_state);
3019 }
3020
3021 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3022                                   struct intel_crtc_state *pipe_config,
3023                                   struct drm_connector_state *conn_state)
3024 {
3025         intel_dp_prepare(encoder, pipe_config);
3026
3027         vlv_phy_pre_pll_enable(encoder);
3028 }
3029
3030 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3031                               struct intel_crtc_state *pipe_config,
3032                               struct drm_connector_state *conn_state)
3033 {
3034         chv_phy_pre_encoder_enable(encoder);
3035
3036         intel_enable_dp(encoder, pipe_config, conn_state);
3037
3038         /* Second common lane will stay alive on its own now */
3039         chv_phy_release_cl2_override(encoder);
3040 }
3041
3042 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3043                                   struct intel_crtc_state *pipe_config,
3044                                   struct drm_connector_state *conn_state)
3045 {
3046         intel_dp_prepare(encoder, pipe_config);
3047
3048         chv_phy_pre_pll_enable(encoder);
3049 }
3050
3051 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3052                                     struct intel_crtc_state *pipe_config,
3053                                     struct drm_connector_state *conn_state)
3054 {
3055         chv_phy_post_pll_disable(encoder);
3056 }
3057
3058 /*
3059  * Fetch AUX CH registers 0x202 - 0x207 which contain
3060  * link status information
3061  */
3062 bool
3063 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3064 {
3065         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3066                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3067 }
3068
3069 static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
3070 {
3071         uint8_t psr_caps = 0;
3072
3073         drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
3074         return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
3075 }
3076
3077 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3078 {
3079         uint8_t dprx = 0;
3080
3081         drm_dp_dpcd_readb(&intel_dp->aux,
3082                         DP_DPRX_FEATURE_ENUMERATION_LIST,
3083                         &dprx);
3084         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3085 }
3086
3087 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
3088 {
3089         uint8_t alpm_caps = 0;
3090
3091         drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
3092         return alpm_caps & DP_ALPM_CAP;
3093 }
3094
3095 /* These are source-specific values. */
3096 uint8_t
3097 intel_dp_voltage_max(struct intel_dp *intel_dp)
3098 {
3099         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3100         enum port port = dp_to_dig_port(intel_dp)->port;
3101
3102         if (IS_GEN9_LP(dev_priv))
3103                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3104         else if (INTEL_GEN(dev_priv) >= 9) {
3105                 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3106                 return intel_ddi_dp_voltage_max(encoder);
3107         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3108                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3109         else if (IS_GEN7(dev_priv) && port == PORT_A)
3110                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3111         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3112                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3113         else
3114                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3115 }
3116
3117 uint8_t
3118 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3119 {
3120         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3121         enum port port = dp_to_dig_port(intel_dp)->port;
3122
3123         if (INTEL_GEN(dev_priv) >= 9) {
3124                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3125                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3126                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3127                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3128                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3129                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3130                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3131                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3132                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3133                 default:
3134                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3135                 }
3136         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3137                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3138                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3139                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3140                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3141                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3142                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3143                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3144                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3145                 default:
3146                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3147                 }
3148         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3149                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3150                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3151                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3152                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3153                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3154                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3155                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3156                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3157                 default:
3158                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3159                 }
3160         } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3161                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3162                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3163                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3164                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3165                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3166                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3167                 default:
3168                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3169                 }
3170         } else {
3171                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3172                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3173                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3174                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3175                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3176                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3177                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3178                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3179                 default:
3180                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3181                 }
3182         }
3183 }
3184
3185 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3186 {
3187         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3188         unsigned long demph_reg_value, preemph_reg_value,
3189                 uniqtranscale_reg_value;
3190         uint8_t train_set = intel_dp->train_set[0];
3191
3192         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3193         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3194                 preemph_reg_value = 0x0004000;
3195                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3196                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3197                         demph_reg_value = 0x2B405555;
3198                         uniqtranscale_reg_value = 0x552AB83A;
3199                         break;
3200                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3201                         demph_reg_value = 0x2B404040;
3202                         uniqtranscale_reg_value = 0x5548B83A;
3203                         break;
3204                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3205                         demph_reg_value = 0x2B245555;
3206                         uniqtranscale_reg_value = 0x5560B83A;
3207                         break;
3208                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3209                         demph_reg_value = 0x2B405555;
3210                         uniqtranscale_reg_value = 0x5598DA3A;
3211                         break;
3212                 default:
3213                         return 0;
3214                 }
3215                 break;
3216         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3217                 preemph_reg_value = 0x0002000;
3218                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3219                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3220                         demph_reg_value = 0x2B404040;
3221                         uniqtranscale_reg_value = 0x5552B83A;
3222                         break;
3223                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3224                         demph_reg_value = 0x2B404848;
3225                         uniqtranscale_reg_value = 0x5580B83A;
3226                         break;
3227                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3228                         demph_reg_value = 0x2B404040;
3229                         uniqtranscale_reg_value = 0x55ADDA3A;
3230                         break;
3231                 default:
3232                         return 0;
3233                 }
3234                 break;
3235         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3236                 preemph_reg_value = 0x0000000;
3237                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3238                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3239                         demph_reg_value = 0x2B305555;
3240                         uniqtranscale_reg_value = 0x5570B83A;
3241                         break;
3242                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3243                         demph_reg_value = 0x2B2B4040;
3244                         uniqtranscale_reg_value = 0x55ADDA3A;
3245                         break;
3246                 default:
3247                         return 0;
3248                 }
3249                 break;
3250         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3251                 preemph_reg_value = 0x0006000;
3252                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3253                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3254                         demph_reg_value = 0x1B405555;
3255                         uniqtranscale_reg_value = 0x55ADDA3A;
3256                         break;
3257                 default:
3258                         return 0;
3259                 }
3260                 break;
3261         default:
3262                 return 0;
3263         }
3264
3265         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3266                                  uniqtranscale_reg_value, 0);
3267
3268         return 0;
3269 }
3270
3271 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3272 {
3273         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3274         u32 deemph_reg_value, margin_reg_value;
3275         bool uniq_trans_scale = false;
3276         uint8_t train_set = intel_dp->train_set[0];
3277
3278         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3279         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3280                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3281                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3282                         deemph_reg_value = 128;
3283                         margin_reg_value = 52;
3284                         break;
3285                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3286                         deemph_reg_value = 128;
3287                         margin_reg_value = 77;
3288                         break;
3289                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3290                         deemph_reg_value = 128;
3291                         margin_reg_value = 102;
3292                         break;
3293                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3294                         deemph_reg_value = 128;
3295                         margin_reg_value = 154;
3296                         uniq_trans_scale = true;
3297                         break;
3298                 default:
3299                         return 0;
3300                 }
3301                 break;
3302         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3303                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3304                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3305                         deemph_reg_value = 85;
3306                         margin_reg_value = 78;
3307                         break;
3308                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3309                         deemph_reg_value = 85;
3310                         margin_reg_value = 116;
3311                         break;
3312                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3313                         deemph_reg_value = 85;
3314                         margin_reg_value = 154;
3315                         break;
3316                 default:
3317                         return 0;
3318                 }
3319                 break;
3320         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3321                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3322                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3323                         deemph_reg_value = 64;
3324                         margin_reg_value = 104;
3325                         break;
3326                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3327                         deemph_reg_value = 64;
3328                         margin_reg_value = 154;
3329                         break;
3330                 default:
3331                         return 0;
3332                 }
3333                 break;
3334         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3335                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3336                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3337                         deemph_reg_value = 43;
3338                         margin_reg_value = 154;
3339                         break;
3340                 default:
3341                         return 0;
3342                 }
3343                 break;
3344         default:
3345                 return 0;
3346         }
3347
3348         chv_set_phy_signal_level(encoder, deemph_reg_value,
3349                                  margin_reg_value, uniq_trans_scale);
3350
3351         return 0;
3352 }
3353
3354 static uint32_t
3355 gen4_signal_levels(uint8_t train_set)
3356 {
3357         uint32_t        signal_levels = 0;
3358
3359         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3360         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3361         default:
3362                 signal_levels |= DP_VOLTAGE_0_4;
3363                 break;
3364         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3365                 signal_levels |= DP_VOLTAGE_0_6;
3366                 break;
3367         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3368                 signal_levels |= DP_VOLTAGE_0_8;
3369                 break;
3370         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3371                 signal_levels |= DP_VOLTAGE_1_2;
3372                 break;
3373         }
3374         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3375         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3376         default:
3377                 signal_levels |= DP_PRE_EMPHASIS_0;
3378                 break;
3379         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3380                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3381                 break;
3382         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3383                 signal_levels |= DP_PRE_EMPHASIS_6;
3384                 break;
3385         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3386                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3387                 break;
3388         }
3389         return signal_levels;
3390 }
3391
3392 /* Gen6's DP voltage swing and pre-emphasis control */
3393 static uint32_t
3394 gen6_edp_signal_levels(uint8_t train_set)
3395 {
3396         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3397                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3398         switch (signal_levels) {
3399         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3400         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3401                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3402         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3403                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3404         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3405         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3406                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3407         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3408         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3409                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3410         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3411         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3412                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3413         default:
3414                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3415                               "0x%x\n", signal_levels);
3416                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3417         }
3418 }
3419
3420 /* Gen7's DP voltage swing and pre-emphasis control */
3421 static uint32_t
3422 gen7_edp_signal_levels(uint8_t train_set)
3423 {
3424         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3425                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3426         switch (signal_levels) {
3427         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3428                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3429         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3430                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3431         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3432                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3433
3434         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3435                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3436         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3437                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3438
3439         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3440                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3441         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3442                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3443
3444         default:
3445                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3446                               "0x%x\n", signal_levels);
3447                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3448         }
3449 }
3450
3451 void
3452 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3453 {
3454         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3455         enum port port = intel_dig_port->port;
3456         struct drm_device *dev = intel_dig_port->base.base.dev;
3457         struct drm_i915_private *dev_priv = to_i915(dev);
3458         uint32_t signal_levels, mask = 0;
3459         uint8_t train_set = intel_dp->train_set[0];
3460
3461         if (HAS_DDI(dev_priv)) {
3462                 signal_levels = ddi_signal_levels(intel_dp);
3463
3464                 if (IS_GEN9_LP(dev_priv))
3465                         signal_levels = 0;
3466                 else
3467                         mask = DDI_BUF_EMP_MASK;
3468         } else if (IS_CHERRYVIEW(dev_priv)) {
3469                 signal_levels = chv_signal_levels(intel_dp);
3470         } else if (IS_VALLEYVIEW(dev_priv)) {
3471                 signal_levels = vlv_signal_levels(intel_dp);
3472         } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3473                 signal_levels = gen7_edp_signal_levels(train_set);
3474                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3475         } else if (IS_GEN6(dev_priv) && port == PORT_A) {
3476                 signal_levels = gen6_edp_signal_levels(train_set);
3477                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3478         } else {
3479                 signal_levels = gen4_signal_levels(train_set);
3480                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3481         }
3482
3483         if (mask)
3484                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3485
3486         DRM_DEBUG_KMS("Using vswing level %d\n",
3487                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3488         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3489                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3490                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3491
3492         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3493
3494         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3495         POSTING_READ(intel_dp->output_reg);
3496 }
3497
3498 void
3499 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3500                                        uint8_t dp_train_pat)
3501 {
3502         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3503         struct drm_i915_private *dev_priv =
3504                 to_i915(intel_dig_port->base.base.dev);
3505
3506         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3507
3508         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3509         POSTING_READ(intel_dp->output_reg);
3510 }
3511
3512 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3513 {
3514         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3515         struct drm_device *dev = intel_dig_port->base.base.dev;
3516         struct drm_i915_private *dev_priv = to_i915(dev);
3517         enum port port = intel_dig_port->port;
3518         uint32_t val;
3519
3520         if (!HAS_DDI(dev_priv))
3521                 return;
3522
3523         val = I915_READ(DP_TP_CTL(port));
3524         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3525         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3526         I915_WRITE(DP_TP_CTL(port), val);
3527
3528         /*
3529          * On PORT_A we can have only eDP in SST mode. There the only reason
3530          * we need to set idle transmission mode is to work around a HW issue
3531          * where we enable the pipe while not in idle link-training mode.
3532          * In this case there is requirement to wait for a minimum number of
3533          * idle patterns to be sent.
3534          */
3535         if (port == PORT_A)
3536                 return;
3537
3538         if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3539                                     DP_TP_STATUS_IDLE_DONE,
3540                                     DP_TP_STATUS_IDLE_DONE,
3541                                     1))
3542                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3543 }
3544
3545 static void
3546 intel_dp_link_down(struct intel_dp *intel_dp)
3547 {
3548         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3549         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3550         enum port port = intel_dig_port->port;
3551         struct drm_device *dev = intel_dig_port->base.base.dev;
3552         struct drm_i915_private *dev_priv = to_i915(dev);
3553         uint32_t DP = intel_dp->DP;
3554
3555         if (WARN_ON(HAS_DDI(dev_priv)))
3556                 return;
3557
3558         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3559                 return;
3560
3561         DRM_DEBUG_KMS("\n");
3562
3563         if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3564             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3565                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3566                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3567         } else {
3568                 if (IS_CHERRYVIEW(dev_priv))
3569                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3570                 else
3571                         DP &= ~DP_LINK_TRAIN_MASK;
3572                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3573         }
3574         I915_WRITE(intel_dp->output_reg, DP);
3575         POSTING_READ(intel_dp->output_reg);
3576
3577         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3578         I915_WRITE(intel_dp->output_reg, DP);
3579         POSTING_READ(intel_dp->output_reg);
3580
3581         /*
3582          * HW workaround for IBX, we need to move the port
3583          * to transcoder A after disabling it to allow the
3584          * matching HDMI port to be enabled on transcoder A.
3585          */
3586         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3587                 /*
3588                  * We get CPU/PCH FIFO underruns on the other pipe when
3589                  * doing the workaround. Sweep them under the rug.
3590                  */
3591                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3592                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3593
3594                 /* always enable with pattern 1 (as per spec) */
3595                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3596                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3597                 I915_WRITE(intel_dp->output_reg, DP);
3598                 POSTING_READ(intel_dp->output_reg);
3599
3600                 DP &= ~DP_PORT_EN;
3601                 I915_WRITE(intel_dp->output_reg, DP);
3602                 POSTING_READ(intel_dp->output_reg);
3603
3604                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3605                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3606                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3607         }
3608
3609         msleep(intel_dp->panel_power_down_delay);
3610
3611         intel_dp->DP = DP;
3612
3613         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3614                 pps_lock(intel_dp);
3615                 intel_dp->active_pipe = INVALID_PIPE;
3616                 pps_unlock(intel_dp);
3617         }
3618 }
3619
3620 bool
3621 intel_dp_read_dpcd(struct intel_dp *intel_dp)
3622 {
3623         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3624                              sizeof(intel_dp->dpcd)) < 0)
3625                 return false; /* aux transfer failed */
3626
3627         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3628
3629         return intel_dp->dpcd[DP_DPCD_REV] != 0;
3630 }
3631
3632 static bool
3633 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3634 {
3635         struct drm_i915_private *dev_priv =
3636                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3637
3638         /* this function is meant to be called only once */
3639         WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3640
3641         if (!intel_dp_read_dpcd(intel_dp))
3642                 return false;
3643
3644         intel_dp_read_desc(intel_dp);
3645
3646         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3647                 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3648                         DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3649
3650         /* Check if the panel supports PSR */
3651         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3652                          intel_dp->psr_dpcd,
3653                          sizeof(intel_dp->psr_dpcd));
3654         if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3655                 dev_priv->psr.sink_support = true;
3656                 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3657         }
3658
3659         if (INTEL_GEN(dev_priv) >= 9 &&
3660             (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3661                 uint8_t frame_sync_cap;
3662
3663                 dev_priv->psr.sink_support = true;
3664                 drm_dp_dpcd_read(&intel_dp->aux,
3665                                  DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3666                                  &frame_sync_cap, 1);
3667                 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3668                 /* PSR2 needs frame sync as well */
3669                 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3670                 DRM_DEBUG_KMS("PSR2 %s on sink",
3671                               dev_priv->psr.psr2_support ? "supported" : "not supported");
3672
3673                 if (dev_priv->psr.psr2_support) {
3674                         dev_priv->psr.y_cord_support =
3675                                 intel_dp_get_y_cord_status(intel_dp);
3676                         dev_priv->psr.colorimetry_support =
3677                                 intel_dp_get_colorimetry_status(intel_dp);
3678                         dev_priv->psr.alpm =
3679                                 intel_dp_get_alpm_status(intel_dp);
3680                 }
3681
3682         }
3683
3684         /* Read the eDP Display control capabilities registers */
3685         if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3686             drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3687                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3688                              sizeof(intel_dp->edp_dpcd))
3689                 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3690                               intel_dp->edp_dpcd);
3691
3692         /* Intermediate frequency support */
3693         if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
3694                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3695                 int i;
3696
3697                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3698                                 sink_rates, sizeof(sink_rates));
3699
3700                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3701                         int val = le16_to_cpu(sink_rates[i]);
3702
3703                         if (val == 0)
3704                                 break;
3705
3706                         /* Value read multiplied by 200kHz gives the per-lane
3707                          * link rate in kHz. The source rates are, however,
3708                          * stored in terms of LS_Clk kHz. The full conversion
3709                          * back to symbols is
3710                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3711                          */
3712                         intel_dp->sink_rates[i] = (val * 200) / 10;
3713                 }
3714                 intel_dp->num_sink_rates = i;
3715         }
3716
3717         if (intel_dp->num_sink_rates)
3718                 intel_dp->use_rate_select = true;
3719         else
3720                 intel_dp_set_sink_rates(intel_dp);
3721
3722         intel_dp_set_common_rates(intel_dp);
3723
3724         return true;
3725 }
3726
3727
3728 static bool
3729 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3730 {
3731         if (!intel_dp_read_dpcd(intel_dp))
3732                 return false;
3733
3734         /* Don't clobber cached eDP rates. */
3735         if (!is_edp(intel_dp)) {
3736                 intel_dp_set_sink_rates(intel_dp);
3737                 intel_dp_set_common_rates(intel_dp);
3738         }
3739
3740         if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3741                              &intel_dp->sink_count, 1) < 0)
3742                 return false;
3743
3744         /*
3745          * Sink count can change between short pulse hpd hence
3746          * a member variable in intel_dp will track any changes
3747          * between short pulse interrupts.
3748          */
3749         intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3750
3751         /*
3752          * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3753          * a dongle is present but no display. Unless we require to know
3754          * if a dongle is present or not, we don't need to update
3755          * downstream port information. So, an early return here saves
3756          * time from performing other operations which are not required.
3757          */
3758         if (!is_edp(intel_dp) && !intel_dp->sink_count)
3759                 return false;
3760
3761         if (!drm_dp_is_branch(intel_dp->dpcd))
3762                 return true; /* native DP sink */
3763
3764         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3765                 return true; /* no per-port downstream info */
3766
3767         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3768                              intel_dp->downstream_ports,
3769                              DP_MAX_DOWNSTREAM_PORTS) < 0)
3770                 return false; /* downstream port status fetch failed */
3771
3772         return true;
3773 }
3774
3775 static bool
3776 intel_dp_can_mst(struct intel_dp *intel_dp)
3777 {
3778         u8 buf[1];
3779
3780         if (!i915.enable_dp_mst)
3781                 return false;
3782
3783         if (!intel_dp->can_mst)
3784                 return false;
3785
3786         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3787                 return false;
3788
3789         if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
3790                 return false;
3791
3792         return buf[0] & DP_MST_CAP;
3793 }
3794
3795 static void
3796 intel_dp_configure_mst(struct intel_dp *intel_dp)
3797 {
3798         if (!i915.enable_dp_mst)
3799                 return;
3800
3801         if (!intel_dp->can_mst)
3802                 return;
3803
3804         intel_dp->is_mst = intel_dp_can_mst(intel_dp);
3805
3806         if (intel_dp->is_mst)
3807                 DRM_DEBUG_KMS("Sink is MST capable\n");
3808         else
3809                 DRM_DEBUG_KMS("Sink is not MST capable\n");
3810
3811         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3812                                         intel_dp->is_mst);
3813 }
3814
3815 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3816 {
3817         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3818         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3819         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3820         u8 buf;
3821         int ret = 0;
3822         int count = 0;
3823         int attempts = 10;
3824
3825         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3826                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3827                 ret = -EIO;
3828                 goto out;
3829         }
3830
3831         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3832                                buf & ~DP_TEST_SINK_START) < 0) {
3833                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3834                 ret = -EIO;
3835                 goto out;
3836         }
3837
3838         do {
3839                 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3840
3841                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3842                                       DP_TEST_SINK_MISC, &buf) < 0) {
3843                         ret = -EIO;
3844                         goto out;
3845                 }
3846                 count = buf & DP_TEST_COUNT_MASK;
3847         } while (--attempts && count);
3848
3849         if (attempts == 0) {
3850                 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3851                 ret = -ETIMEDOUT;
3852         }
3853
3854  out:
3855         hsw_enable_ips(intel_crtc);
3856         return ret;
3857 }
3858
3859 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3860 {
3861         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3862         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3863         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3864         u8 buf;
3865         int ret;
3866
3867         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3868                 return -EIO;
3869
3870         if (!(buf & DP_TEST_CRC_SUPPORTED))
3871                 return -ENOTTY;
3872
3873         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3874                 return -EIO;
3875
3876         if (buf & DP_TEST_SINK_START) {
3877                 ret = intel_dp_sink_crc_stop(intel_dp);
3878                 if (ret)
3879                         return ret;
3880         }
3881
3882         hsw_disable_ips(intel_crtc);
3883
3884         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3885                                buf | DP_TEST_SINK_START) < 0) {
3886                 hsw_enable_ips(intel_crtc);
3887                 return -EIO;
3888         }
3889
3890         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3891         return 0;
3892 }
3893
3894 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3895 {
3896         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3897         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3898         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3899         u8 buf;
3900         int count, ret;
3901         int attempts = 6;
3902
3903         ret = intel_dp_sink_crc_start(intel_dp);
3904         if (ret)
3905                 return ret;
3906
3907         do {
3908                 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3909
3910                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3911                                       DP_TEST_SINK_MISC, &buf) < 0) {
3912                         ret = -EIO;
3913                         goto stop;
3914                 }
3915                 count = buf & DP_TEST_COUNT_MASK;
3916
3917         } while (--attempts && count == 0);
3918
3919         if (attempts == 0) {
3920                 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3921                 ret = -ETIMEDOUT;
3922                 goto stop;
3923         }
3924
3925         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3926                 ret = -EIO;
3927                 goto stop;
3928         }
3929
3930 stop:
3931         intel_dp_sink_crc_stop(intel_dp);
3932         return ret;
3933 }
3934
3935 static bool
3936 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3937 {
3938         return drm_dp_dpcd_read(&intel_dp->aux,
3939                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3940                                        sink_irq_vector, 1) == 1;
3941 }
3942
3943 static bool
3944 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3945 {
3946         int ret;
3947
3948         ret = drm_dp_dpcd_read(&intel_dp->aux,
3949                                              DP_SINK_COUNT_ESI,
3950                                              sink_irq_vector, 14);
3951         if (ret != 14)
3952                 return false;
3953
3954         return true;
3955 }
3956
3957 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3958 {
3959         int status = 0;
3960         int min_lane_count = 1;
3961         int link_rate_index, test_link_rate;
3962         uint8_t test_lane_count, test_link_bw;
3963         /* (DP CTS 1.2)
3964          * 4.3.1.11
3965          */
3966         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
3967         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
3968                                    &test_lane_count);
3969
3970         if (status <= 0) {
3971                 DRM_DEBUG_KMS("Lane count read failed\n");
3972                 return DP_TEST_NAK;
3973         }
3974         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
3975         /* Validate the requested lane count */
3976         if (test_lane_count < min_lane_count ||
3977             test_lane_count > intel_dp->max_link_lane_count)
3978                 return DP_TEST_NAK;
3979
3980         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
3981                                    &test_link_bw);
3982         if (status <= 0) {
3983                 DRM_DEBUG_KMS("Link Rate read failed\n");
3984                 return DP_TEST_NAK;
3985         }
3986         /* Validate the requested link rate */
3987         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
3988         link_rate_index = intel_dp_rate_index(intel_dp->common_rates,
3989                                               intel_dp->num_common_rates,
3990                                               test_link_rate);
3991         if (link_rate_index < 0)
3992                 return DP_TEST_NAK;
3993
3994         intel_dp->compliance.test_lane_count = test_lane_count;
3995         intel_dp->compliance.test_link_rate = test_link_rate;
3996
3997         return DP_TEST_ACK;
3998 }
3999
4000 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4001 {
4002         uint8_t test_pattern;
4003         uint16_t test_misc;
4004         __be16 h_width, v_height;
4005         int status = 0;
4006
4007         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4008         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_PATTERN,
4009                                   &test_pattern, 1);
4010         if (status <= 0) {
4011                 DRM_DEBUG_KMS("Test pattern read failed\n");
4012                 return DP_TEST_NAK;
4013         }
4014         if (test_pattern != DP_COLOR_RAMP)
4015                 return DP_TEST_NAK;
4016
4017         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4018                                   &h_width, 2);
4019         if (status <= 0) {
4020                 DRM_DEBUG_KMS("H Width read failed\n");
4021                 return DP_TEST_NAK;
4022         }
4023
4024         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4025                                   &v_height, 2);
4026         if (status <= 0) {
4027                 DRM_DEBUG_KMS("V Height read failed\n");
4028                 return DP_TEST_NAK;
4029         }
4030
4031         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_MISC0,
4032                                   &test_misc, 1);
4033         if (status <= 0) {
4034                 DRM_DEBUG_KMS("TEST MISC read failed\n");
4035                 return DP_TEST_NAK;
4036         }
4037         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4038                 return DP_TEST_NAK;
4039         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4040                 return DP_TEST_NAK;
4041         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4042         case DP_TEST_BIT_DEPTH_6:
4043                 intel_dp->compliance.test_data.bpc = 6;
4044                 break;
4045         case DP_TEST_BIT_DEPTH_8:
4046                 intel_dp->compliance.test_data.bpc = 8;
4047                 break;
4048         default:
4049                 return DP_TEST_NAK;
4050         }
4051
4052         intel_dp->compliance.test_data.video_pattern = test_pattern;
4053         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4054         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4055         /* Set test active flag here so userspace doesn't interrupt things */
4056         intel_dp->compliance.test_active = 1;
4057
4058         return DP_TEST_ACK;
4059 }
4060
4061 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4062 {
4063         uint8_t test_result = DP_TEST_ACK;
4064         struct intel_connector *intel_connector = intel_dp->attached_connector;
4065         struct drm_connector *connector = &intel_connector->base;
4066
4067         if (intel_connector->detect_edid == NULL ||
4068             connector->edid_corrupt ||
4069             intel_dp->aux.i2c_defer_count > 6) {
4070                 /* Check EDID read for NACKs, DEFERs and corruption
4071                  * (DP CTS 1.2 Core r1.1)
4072                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4073                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4074                  *    4.2.2.6 : EDID corruption detected
4075                  * Use failsafe mode for all cases
4076                  */
4077                 if (intel_dp->aux.i2c_nack_count > 0 ||
4078                         intel_dp->aux.i2c_defer_count > 0)
4079                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4080                                       intel_dp->aux.i2c_nack_count,
4081                                       intel_dp->aux.i2c_defer_count);
4082                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4083         } else {
4084                 struct edid *block = intel_connector->detect_edid;
4085
4086                 /* We have to write the checksum
4087                  * of the last block read
4088                  */
4089                 block += intel_connector->detect_edid->extensions;
4090
4091                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4092                                         DP_TEST_EDID_CHECKSUM,
4093                                         &block->checksum,
4094                                         1))
4095                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4096
4097                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4098                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4099         }
4100
4101         /* Set test active flag here so userspace doesn't interrupt things */
4102         intel_dp->compliance.test_active = 1;
4103
4104         return test_result;
4105 }
4106
4107 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4108 {
4109         uint8_t test_result = DP_TEST_NAK;
4110         return test_result;
4111 }
4112
4113 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4114 {
4115         uint8_t response = DP_TEST_NAK;
4116         uint8_t request = 0;
4117         int status;
4118
4119         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4120         if (status <= 0) {
4121                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4122                 goto update_status;
4123         }
4124
4125         switch (request) {
4126         case DP_TEST_LINK_TRAINING:
4127                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4128                 response = intel_dp_autotest_link_training(intel_dp);
4129                 break;
4130         case DP_TEST_LINK_VIDEO_PATTERN:
4131                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4132                 response = intel_dp_autotest_video_pattern(intel_dp);
4133                 break;
4134         case DP_TEST_LINK_EDID_READ:
4135                 DRM_DEBUG_KMS("EDID test requested\n");
4136                 response = intel_dp_autotest_edid(intel_dp);
4137                 break;
4138         case DP_TEST_LINK_PHY_TEST_PATTERN:
4139                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4140                 response = intel_dp_autotest_phy_pattern(intel_dp);
4141                 break;
4142         default:
4143                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4144                 break;
4145         }
4146
4147         if (response & DP_TEST_ACK)
4148                 intel_dp->compliance.test_type = request;
4149
4150 update_status:
4151         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4152         if (status <= 0)
4153                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4154 }
4155
4156 static int
4157 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4158 {
4159         bool bret;
4160
4161         if (intel_dp->is_mst) {
4162                 u8 esi[16] = { 0 };
4163                 int ret = 0;
4164                 int retry;
4165                 bool handled;
4166                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4167 go_again:
4168                 if (bret == true) {
4169
4170                         /* check link status - esi[10] = 0x200c */
4171                         if (intel_dp->active_mst_links &&
4172                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4173                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4174                                 intel_dp_start_link_train(intel_dp);
4175                                 intel_dp_stop_link_train(intel_dp);
4176                         }
4177
4178                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4179                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4180
4181                         if (handled) {
4182                                 for (retry = 0; retry < 3; retry++) {
4183                                         int wret;
4184                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4185                                                                  DP_SINK_COUNT_ESI+1,
4186                                                                  &esi[1], 3);
4187                                         if (wret == 3) {
4188                                                 break;
4189                                         }
4190                                 }
4191
4192                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4193                                 if (bret == true) {
4194                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4195                                         goto go_again;
4196                                 }
4197                         } else
4198                                 ret = 0;
4199
4200                         return ret;
4201                 } else {
4202                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4203                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4204                         intel_dp->is_mst = false;
4205                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4206                         /* send a hotplug event */
4207                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4208                 }
4209         }
4210         return -EINVAL;
4211 }
4212
4213 static void
4214 intel_dp_retrain_link(struct intel_dp *intel_dp)
4215 {
4216         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4217         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4218         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4219
4220         /* Suppress underruns caused by re-training */
4221         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4222         if (crtc->config->has_pch_encoder)
4223                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4224                                                       intel_crtc_pch_transcoder(crtc), false);
4225
4226         intel_dp_start_link_train(intel_dp);
4227         intel_dp_stop_link_train(intel_dp);
4228
4229         /* Keep underrun reporting disabled until things are stable */
4230         intel_wait_for_vblank(dev_priv, crtc->pipe);
4231
4232         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4233         if (crtc->config->has_pch_encoder)
4234                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4235                                                       intel_crtc_pch_transcoder(crtc), true);
4236 }
4237
4238 static void
4239 intel_dp_check_link_status(struct intel_dp *intel_dp)
4240 {
4241         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4242         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4243         u8 link_status[DP_LINK_STATUS_SIZE];
4244
4245         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4246
4247         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4248                 DRM_ERROR("Failed to get link status\n");
4249                 return;
4250         }
4251
4252         if (!intel_encoder->base.crtc)
4253                 return;
4254
4255         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4256                 return;
4257
4258         /* FIXME: we need to synchronize this sort of stuff with hardware
4259          * readout. Currently fast link training doesn't work on boot-up. */
4260         if (!intel_dp->lane_count)
4261                 return;
4262
4263         /* Retrain if Channel EQ or CR not ok */
4264         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4265                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4266                               intel_encoder->base.name);
4267
4268                 intel_dp_retrain_link(intel_dp);
4269         }
4270 }
4271
4272 /*
4273  * According to DP spec
4274  * 5.1.2:
4275  *  1. Read DPCD
4276  *  2. Configure link according to Receiver Capabilities
4277  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4278  *  4. Check link status on receipt of hot-plug interrupt
4279  *
4280  * intel_dp_short_pulse -  handles short pulse interrupts
4281  * when full detection is not required.
4282  * Returns %true if short pulse is handled and full detection
4283  * is NOT required and %false otherwise.
4284  */
4285 static bool
4286 intel_dp_short_pulse(struct intel_dp *intel_dp)
4287 {
4288         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4289         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4290         u8 sink_irq_vector = 0;
4291         u8 old_sink_count = intel_dp->sink_count;
4292         bool ret;
4293
4294         /*
4295          * Clearing compliance test variables to allow capturing
4296          * of values for next automated test request.
4297          */
4298         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4299
4300         /*
4301          * Now read the DPCD to see if it's actually running
4302          * If the current value of sink count doesn't match with
4303          * the value that was stored earlier or dpcd read failed
4304          * we need to do full detection
4305          */
4306         ret = intel_dp_get_dpcd(intel_dp);
4307
4308         if ((old_sink_count != intel_dp->sink_count) || !ret) {
4309                 /* No need to proceed if we are going to do full detect */
4310                 return false;
4311         }
4312
4313         /* Try to read the source of the interrupt */
4314         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4315             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4316             sink_irq_vector != 0) {
4317                 /* Clear interrupt source */
4318                 drm_dp_dpcd_writeb(&intel_dp->aux,
4319                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4320                                    sink_irq_vector);
4321
4322                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4323                         intel_dp_handle_test_request(intel_dp);
4324                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4325                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4326         }
4327
4328         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4329         intel_dp_check_link_status(intel_dp);
4330         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4331         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4332                 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4333                 /* Send a Hotplug Uevent to userspace to start modeset */
4334                 drm_kms_helper_hotplug_event(intel_encoder->base.dev);
4335         }
4336
4337         return true;
4338 }
4339
4340 /* XXX this is probably wrong for multiple downstream ports */
4341 static enum drm_connector_status
4342 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4343 {
4344         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4345         uint8_t *dpcd = intel_dp->dpcd;
4346         uint8_t type;
4347
4348         if (lspcon->active)
4349                 lspcon_resume(lspcon);
4350
4351         if (!intel_dp_get_dpcd(intel_dp))
4352                 return connector_status_disconnected;
4353
4354         if (is_edp(intel_dp))
4355                 return connector_status_connected;
4356
4357         /* if there's no downstream port, we're done */
4358         if (!drm_dp_is_branch(dpcd))
4359                 return connector_status_connected;
4360
4361         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4362         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4363             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4364
4365                 return intel_dp->sink_count ?
4366                 connector_status_connected : connector_status_disconnected;
4367         }
4368
4369         if (intel_dp_can_mst(intel_dp))
4370                 return connector_status_connected;
4371
4372         /* If no HPD, poke DDC gently */
4373         if (drm_probe_ddc(&intel_dp->aux.ddc))
4374                 return connector_status_connected;
4375
4376         /* Well we tried, say unknown for unreliable port types */
4377         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4378                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4379                 if (type == DP_DS_PORT_TYPE_VGA ||
4380                     type == DP_DS_PORT_TYPE_NON_EDID)
4381                         return connector_status_unknown;
4382         } else {
4383                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4384                         DP_DWN_STRM_PORT_TYPE_MASK;
4385                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4386                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4387                         return connector_status_unknown;
4388         }
4389
4390         /* Anything else is out of spec, warn and ignore */
4391         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4392         return connector_status_disconnected;
4393 }
4394
4395 static enum drm_connector_status
4396 edp_detect(struct intel_dp *intel_dp)
4397 {
4398         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4399         struct drm_i915_private *dev_priv = to_i915(dev);
4400         enum drm_connector_status status;
4401
4402         status = intel_panel_detect(dev_priv);
4403         if (status == connector_status_unknown)
4404                 status = connector_status_connected;
4405
4406         return status;
4407 }
4408
4409 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4410                                        struct intel_digital_port *port)
4411 {
4412         u32 bit;
4413
4414         switch (port->port) {
4415         case PORT_A:
4416                 return true;
4417         case PORT_B:
4418                 bit = SDE_PORTB_HOTPLUG;
4419                 break;
4420         case PORT_C:
4421                 bit = SDE_PORTC_HOTPLUG;
4422                 break;
4423         case PORT_D:
4424                 bit = SDE_PORTD_HOTPLUG;
4425                 break;
4426         default:
4427                 MISSING_CASE(port->port);
4428                 return false;
4429         }
4430
4431         return I915_READ(SDEISR) & bit;
4432 }
4433
4434 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4435                                        struct intel_digital_port *port)
4436 {
4437         u32 bit;
4438
4439         switch (port->port) {
4440         case PORT_A:
4441                 return true;
4442         case PORT_B:
4443                 bit = SDE_PORTB_HOTPLUG_CPT;
4444                 break;
4445         case PORT_C:
4446                 bit = SDE_PORTC_HOTPLUG_CPT;
4447                 break;
4448         case PORT_D:
4449                 bit = SDE_PORTD_HOTPLUG_CPT;
4450                 break;
4451         case PORT_E:
4452                 bit = SDE_PORTE_HOTPLUG_SPT;
4453                 break;
4454         default:
4455                 MISSING_CASE(port->port);
4456                 return false;
4457         }
4458
4459         return I915_READ(SDEISR) & bit;
4460 }
4461
4462 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4463                                        struct intel_digital_port *port)
4464 {
4465         u32 bit;
4466
4467         switch (port->port) {
4468         case PORT_B:
4469                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4470                 break;
4471         case PORT_C:
4472                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4473                 break;
4474         case PORT_D:
4475                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4476                 break;
4477         default:
4478                 MISSING_CASE(port->port);
4479                 return false;
4480         }
4481
4482         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4483 }
4484
4485 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4486                                         struct intel_digital_port *port)
4487 {
4488         u32 bit;
4489
4490         switch (port->port) {
4491         case PORT_B:
4492                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4493                 break;
4494         case PORT_C:
4495                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4496                 break;
4497         case PORT_D:
4498                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4499                 break;
4500         default:
4501                 MISSING_CASE(port->port);
4502                 return false;
4503         }
4504
4505         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4506 }
4507
4508 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4509                                        struct intel_digital_port *intel_dig_port)
4510 {
4511         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4512         enum port port;
4513         u32 bit;
4514
4515         intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4516         switch (port) {
4517         case PORT_A:
4518                 bit = BXT_DE_PORT_HP_DDIA;
4519                 break;
4520         case PORT_B:
4521                 bit = BXT_DE_PORT_HP_DDIB;
4522                 break;
4523         case PORT_C:
4524                 bit = BXT_DE_PORT_HP_DDIC;
4525                 break;
4526         default:
4527                 MISSING_CASE(port);
4528                 return false;
4529         }
4530
4531         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4532 }
4533
4534 /*
4535  * intel_digital_port_connected - is the specified port connected?
4536  * @dev_priv: i915 private structure
4537  * @port: the port to test
4538  *
4539  * Return %true if @port is connected, %false otherwise.
4540  */
4541 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4542                                   struct intel_digital_port *port)
4543 {
4544         if (HAS_PCH_IBX(dev_priv))
4545                 return ibx_digital_port_connected(dev_priv, port);
4546         else if (HAS_PCH_SPLIT(dev_priv))
4547                 return cpt_digital_port_connected(dev_priv, port);
4548         else if (IS_GEN9_LP(dev_priv))
4549                 return bxt_digital_port_connected(dev_priv, port);
4550         else if (IS_GM45(dev_priv))
4551                 return gm45_digital_port_connected(dev_priv, port);
4552         else
4553                 return g4x_digital_port_connected(dev_priv, port);
4554 }
4555
4556 static struct edid *
4557 intel_dp_get_edid(struct intel_dp *intel_dp)
4558 {
4559         struct intel_connector *intel_connector = intel_dp->attached_connector;
4560
4561         /* use cached edid if we have one */
4562         if (intel_connector->edid) {
4563                 /* invalid edid */
4564                 if (IS_ERR(intel_connector->edid))
4565                         return NULL;
4566
4567                 return drm_edid_duplicate(intel_connector->edid);
4568         } else
4569                 return drm_get_edid(&intel_connector->base,
4570                                     &intel_dp->aux.ddc);
4571 }
4572
4573 static void
4574 intel_dp_set_edid(struct intel_dp *intel_dp)
4575 {
4576         struct intel_connector *intel_connector = intel_dp->attached_connector;
4577         struct edid *edid;
4578
4579         intel_dp_unset_edid(intel_dp);
4580         edid = intel_dp_get_edid(intel_dp);
4581         intel_connector->detect_edid = edid;
4582
4583         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4584                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4585         else
4586                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4587 }
4588
4589 static void
4590 intel_dp_unset_edid(struct intel_dp *intel_dp)
4591 {
4592         struct intel_connector *intel_connector = intel_dp->attached_connector;
4593
4594         kfree(intel_connector->detect_edid);
4595         intel_connector->detect_edid = NULL;
4596
4597         intel_dp->has_audio = false;
4598 }
4599
4600 static enum drm_connector_status
4601 intel_dp_long_pulse(struct intel_connector *intel_connector)
4602 {
4603         struct drm_connector *connector = &intel_connector->base;
4604         struct intel_dp *intel_dp = intel_attached_dp(connector);
4605         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4606         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4607         struct drm_device *dev = connector->dev;
4608         enum drm_connector_status status;
4609         u8 sink_irq_vector = 0;
4610
4611         intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
4612
4613         /* Can't disconnect eDP, but you can close the lid... */
4614         if (is_edp(intel_dp))
4615                 status = edp_detect(intel_dp);
4616         else if (intel_digital_port_connected(to_i915(dev),
4617                                               dp_to_dig_port(intel_dp)))
4618                 status = intel_dp_detect_dpcd(intel_dp);
4619         else
4620                 status = connector_status_disconnected;
4621
4622         if (status == connector_status_disconnected) {
4623                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4624
4625                 if (intel_dp->is_mst) {
4626                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4627                                       intel_dp->is_mst,
4628                                       intel_dp->mst_mgr.mst_state);
4629                         intel_dp->is_mst = false;
4630                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4631                                                         intel_dp->is_mst);
4632                 }
4633
4634                 goto out;
4635         }
4636
4637         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4638                 intel_encoder->type = INTEL_OUTPUT_DP;
4639
4640         DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4641                       yesno(intel_dp_source_supports_hbr2(intel_dp)),
4642                       yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4643
4644         if (intel_dp->reset_link_params) {
4645                 /* Initial max link lane count */
4646                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
4647
4648                 /* Initial max link rate */
4649                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
4650
4651                 intel_dp->reset_link_params = false;
4652         }
4653
4654         intel_dp_print_rates(intel_dp);
4655
4656         intel_dp_read_desc(intel_dp);
4657
4658         intel_dp_configure_mst(intel_dp);
4659
4660         if (intel_dp->is_mst) {
4661                 /*
4662                  * If we are in MST mode then this connector
4663                  * won't appear connected or have anything
4664                  * with EDID on it
4665                  */
4666                 status = connector_status_disconnected;
4667                 goto out;
4668         } else if (connector->status == connector_status_connected) {
4669                 /*
4670                  * If display was connected already and is still connected
4671                  * check links status, there has been known issues of
4672                  * link loss triggerring long pulse!!!!
4673                  */
4674                 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4675                 intel_dp_check_link_status(intel_dp);
4676                 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4677                 goto out;
4678         }
4679
4680         /*
4681          * Clearing NACK and defer counts to get their exact values
4682          * while reading EDID which are required by Compliance tests
4683          * 4.2.2.4 and 4.2.2.5
4684          */
4685         intel_dp->aux.i2c_nack_count = 0;
4686         intel_dp->aux.i2c_defer_count = 0;
4687
4688         intel_dp_set_edid(intel_dp);
4689         if (is_edp(intel_dp) || intel_connector->detect_edid)
4690                 status = connector_status_connected;
4691         intel_dp->detect_done = true;
4692
4693         /* Try to read the source of the interrupt */
4694         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4695             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4696             sink_irq_vector != 0) {
4697                 /* Clear interrupt source */
4698                 drm_dp_dpcd_writeb(&intel_dp->aux,
4699                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4700                                    sink_irq_vector);
4701
4702                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4703                         intel_dp_handle_test_request(intel_dp);
4704                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4705                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4706         }
4707
4708 out:
4709         if (status != connector_status_connected && !intel_dp->is_mst)
4710                 intel_dp_unset_edid(intel_dp);
4711
4712         intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain);
4713         return status;
4714 }
4715
4716 static enum drm_connector_status
4717 intel_dp_detect(struct drm_connector *connector, bool force)
4718 {
4719         struct intel_dp *intel_dp = intel_attached_dp(connector);
4720         enum drm_connector_status status = connector->status;
4721
4722         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4723                       connector->base.id, connector->name);
4724
4725         /* If full detect is not performed yet, do a full detect */
4726         if (!intel_dp->detect_done)
4727                 status = intel_dp_long_pulse(intel_dp->attached_connector);
4728
4729         intel_dp->detect_done = false;
4730
4731         return status;
4732 }
4733
4734 static void
4735 intel_dp_force(struct drm_connector *connector)
4736 {
4737         struct intel_dp *intel_dp = intel_attached_dp(connector);
4738         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4739         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4740
4741         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4742                       connector->base.id, connector->name);
4743         intel_dp_unset_edid(intel_dp);
4744
4745         if (connector->status != connector_status_connected)
4746                 return;
4747
4748         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4749
4750         intel_dp_set_edid(intel_dp);
4751
4752         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4753
4754         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4755                 intel_encoder->type = INTEL_OUTPUT_DP;
4756 }
4757
4758 static int intel_dp_get_modes(struct drm_connector *connector)
4759 {
4760         struct intel_connector *intel_connector = to_intel_connector(connector);
4761         struct edid *edid;
4762
4763         edid = intel_connector->detect_edid;
4764         if (edid) {
4765                 int ret = intel_connector_update_modes(connector, edid);
4766                 if (ret)
4767                         return ret;
4768         }
4769
4770         /* if eDP has no EDID, fall back to fixed mode */
4771         if (is_edp(intel_attached_dp(connector)) &&
4772             intel_connector->panel.fixed_mode) {
4773                 struct drm_display_mode *mode;
4774
4775                 mode = drm_mode_duplicate(connector->dev,
4776                                           intel_connector->panel.fixed_mode);
4777                 if (mode) {
4778                         drm_mode_probed_add(connector, mode);
4779                         return 1;
4780                 }
4781         }
4782
4783         return 0;
4784 }
4785
4786 static bool
4787 intel_dp_detect_audio(struct drm_connector *connector)
4788 {
4789         bool has_audio = false;
4790         struct edid *edid;
4791
4792         edid = to_intel_connector(connector)->detect_edid;
4793         if (edid)
4794                 has_audio = drm_detect_monitor_audio(edid);
4795
4796         return has_audio;
4797 }
4798
4799 static int
4800 intel_dp_set_property(struct drm_connector *connector,
4801                       struct drm_property *property,
4802                       uint64_t val)
4803 {
4804         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4805         struct intel_connector *intel_connector = to_intel_connector(connector);
4806         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4807         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4808         int ret;
4809
4810         ret = drm_object_property_set_value(&connector->base, property, val);
4811         if (ret)
4812                 return ret;
4813
4814         if (property == dev_priv->force_audio_property) {
4815                 int i = val;
4816                 bool has_audio;
4817
4818                 if (i == intel_dp->force_audio)
4819                         return 0;
4820
4821                 intel_dp->force_audio = i;
4822
4823                 if (i == HDMI_AUDIO_AUTO)
4824                         has_audio = intel_dp_detect_audio(connector);
4825                 else
4826                         has_audio = (i == HDMI_AUDIO_ON);
4827
4828                 if (has_audio == intel_dp->has_audio)
4829                         return 0;
4830
4831                 intel_dp->has_audio = has_audio;
4832                 goto done;
4833         }
4834
4835         if (property == dev_priv->broadcast_rgb_property) {
4836                 bool old_auto = intel_dp->color_range_auto;
4837                 bool old_range = intel_dp->limited_color_range;
4838
4839                 switch (val) {
4840                 case INTEL_BROADCAST_RGB_AUTO:
4841                         intel_dp->color_range_auto = true;
4842                         break;
4843                 case INTEL_BROADCAST_RGB_FULL:
4844                         intel_dp->color_range_auto = false;
4845                         intel_dp->limited_color_range = false;
4846                         break;
4847                 case INTEL_BROADCAST_RGB_LIMITED:
4848                         intel_dp->color_range_auto = false;
4849                         intel_dp->limited_color_range = true;
4850                         break;
4851                 default:
4852                         return -EINVAL;
4853                 }
4854
4855                 if (old_auto == intel_dp->color_range_auto &&
4856                     old_range == intel_dp->limited_color_range)
4857                         return 0;
4858
4859                 goto done;
4860         }
4861
4862         if (is_edp(intel_dp) &&
4863             property == connector->dev->mode_config.scaling_mode_property) {
4864                 if (val == DRM_MODE_SCALE_NONE) {
4865                         DRM_DEBUG_KMS("no scaling not supported\n");
4866                         return -EINVAL;
4867                 }
4868                 if (HAS_GMCH_DISPLAY(dev_priv) &&
4869                     val == DRM_MODE_SCALE_CENTER) {
4870                         DRM_DEBUG_KMS("centering not supported\n");
4871                         return -EINVAL;
4872                 }
4873
4874                 if (intel_connector->panel.fitting_mode == val) {
4875                         /* the eDP scaling property is not changed */
4876                         return 0;
4877                 }
4878                 intel_connector->panel.fitting_mode = val;
4879
4880                 goto done;
4881         }
4882
4883         return -EINVAL;
4884
4885 done:
4886         if (intel_encoder->base.crtc)
4887                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4888
4889         return 0;
4890 }
4891
4892 static int
4893 intel_dp_connector_register(struct drm_connector *connector)
4894 {
4895         struct intel_dp *intel_dp = intel_attached_dp(connector);
4896         int ret;
4897
4898         ret = intel_connector_register(connector);
4899         if (ret)
4900                 return ret;
4901
4902         i915_debugfs_connector_add(connector);
4903
4904         DRM_DEBUG_KMS("registering %s bus for %s\n",
4905                       intel_dp->aux.name, connector->kdev->kobj.name);
4906
4907         intel_dp->aux.dev = connector->kdev;
4908         return drm_dp_aux_register(&intel_dp->aux);
4909 }
4910
4911 static void
4912 intel_dp_connector_unregister(struct drm_connector *connector)
4913 {
4914         drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4915         intel_connector_unregister(connector);
4916 }
4917
4918 static void
4919 intel_dp_connector_destroy(struct drm_connector *connector)
4920 {
4921         struct intel_connector *intel_connector = to_intel_connector(connector);
4922
4923         kfree(intel_connector->detect_edid);
4924
4925         if (!IS_ERR_OR_NULL(intel_connector->edid))
4926                 kfree(intel_connector->edid);
4927
4928         /* Can't call is_edp() since the encoder may have been destroyed
4929          * already. */
4930         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4931                 intel_panel_fini(&intel_connector->panel);
4932
4933         drm_connector_cleanup(connector);
4934         kfree(connector);
4935 }
4936
4937 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4938 {
4939         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4940         struct intel_dp *intel_dp = &intel_dig_port->dp;
4941
4942         intel_dp_mst_encoder_cleanup(intel_dig_port);
4943         if (is_edp(intel_dp)) {
4944                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4945                 /*
4946                  * vdd might still be enabled do to the delayed vdd off.
4947                  * Make sure vdd is actually turned off here.
4948                  */
4949                 pps_lock(intel_dp);
4950                 edp_panel_vdd_off_sync(intel_dp);
4951                 pps_unlock(intel_dp);
4952
4953                 if (intel_dp->edp_notifier.notifier_call) {
4954                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4955                         intel_dp->edp_notifier.notifier_call = NULL;
4956                 }
4957         }
4958
4959         intel_dp_aux_fini(intel_dp);
4960
4961         drm_encoder_cleanup(encoder);
4962         kfree(intel_dig_port);
4963 }
4964
4965 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4966 {
4967         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4968
4969         if (!is_edp(intel_dp))
4970                 return;
4971
4972         /*
4973          * vdd might still be enabled do to the delayed vdd off.
4974          * Make sure vdd is actually turned off here.
4975          */
4976         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4977         pps_lock(intel_dp);
4978         edp_panel_vdd_off_sync(intel_dp);
4979         pps_unlock(intel_dp);
4980 }
4981
4982 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4983 {
4984         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4985         struct drm_device *dev = intel_dig_port->base.base.dev;
4986         struct drm_i915_private *dev_priv = to_i915(dev);
4987
4988         lockdep_assert_held(&dev_priv->pps_mutex);
4989
4990         if (!edp_have_panel_vdd(intel_dp))
4991                 return;
4992
4993         /*
4994          * The VDD bit needs a power domain reference, so if the bit is
4995          * already enabled when we boot or resume, grab this reference and
4996          * schedule a vdd off, so we don't hold on to the reference
4997          * indefinitely.
4998          */
4999         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5000         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5001
5002         edp_panel_vdd_schedule_off(intel_dp);
5003 }
5004
5005 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
5006 {
5007         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5008
5009         if ((intel_dp->DP & DP_PORT_EN) == 0)
5010                 return INVALID_PIPE;
5011
5012         if (IS_CHERRYVIEW(dev_priv))
5013                 return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5014         else
5015                 return PORT_TO_PIPE(intel_dp->DP);
5016 }
5017
5018 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5019 {
5020         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5021         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5022         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5023
5024         if (!HAS_DDI(dev_priv))
5025                 intel_dp->DP = I915_READ(intel_dp->output_reg);
5026
5027         if (lspcon->active)
5028                 lspcon_resume(lspcon);
5029
5030         intel_dp->reset_link_params = true;
5031
5032         pps_lock(intel_dp);
5033
5034         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5035                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5036
5037         if (is_edp(intel_dp)) {
5038                 /* Reinit the power sequencer, in case BIOS did something with it. */
5039                 intel_dp_pps_init(encoder->dev, intel_dp);
5040                 intel_edp_panel_vdd_sanitize(intel_dp);
5041         }
5042
5043         pps_unlock(intel_dp);
5044 }
5045
5046 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5047         .dpms = drm_atomic_helper_connector_dpms,
5048         .detect = intel_dp_detect,
5049         .force = intel_dp_force,
5050         .fill_modes = drm_helper_probe_single_connector_modes,
5051         .set_property = intel_dp_set_property,
5052         .atomic_get_property = intel_connector_atomic_get_property,
5053         .late_register = intel_dp_connector_register,
5054         .early_unregister = intel_dp_connector_unregister,
5055         .destroy = intel_dp_connector_destroy,
5056         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5057         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5058 };
5059
5060 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5061         .get_modes = intel_dp_get_modes,
5062         .mode_valid = intel_dp_mode_valid,
5063 };
5064
5065 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5066         .reset = intel_dp_encoder_reset,
5067         .destroy = intel_dp_encoder_destroy,
5068 };
5069
5070 enum irqreturn
5071 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5072 {
5073         struct intel_dp *intel_dp = &intel_dig_port->dp;
5074         struct drm_device *dev = intel_dig_port->base.base.dev;
5075         struct drm_i915_private *dev_priv = to_i915(dev);
5076         enum irqreturn ret = IRQ_NONE;
5077
5078         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5079             intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5080                 intel_dig_port->base.type = INTEL_OUTPUT_DP;
5081
5082         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5083                 /*
5084                  * vdd off can generate a long pulse on eDP which
5085                  * would require vdd on to handle it, and thus we
5086                  * would end up in an endless cycle of
5087                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5088                  */
5089                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5090                               port_name(intel_dig_port->port));
5091                 return IRQ_HANDLED;
5092         }
5093
5094         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5095                       port_name(intel_dig_port->port),
5096                       long_hpd ? "long" : "short");
5097
5098         if (long_hpd) {
5099                 intel_dp->reset_link_params = true;
5100                 intel_dp->detect_done = false;
5101                 return IRQ_NONE;
5102         }
5103
5104         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5105
5106         if (intel_dp->is_mst) {
5107                 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5108                         /*
5109                          * If we were in MST mode, and device is not
5110                          * there, get out of MST mode
5111                          */
5112                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5113                                       intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5114                         intel_dp->is_mst = false;
5115                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5116                                                         intel_dp->is_mst);
5117                         intel_dp->detect_done = false;
5118                         goto put_power;
5119                 }
5120         }
5121
5122         if (!intel_dp->is_mst) {
5123                 if (!intel_dp_short_pulse(intel_dp)) {
5124                         intel_dp->detect_done = false;
5125                         goto put_power;
5126                 }
5127         }
5128
5129         ret = IRQ_HANDLED;
5130
5131 put_power:
5132         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
5133
5134         return ret;
5135 }
5136
5137 /* check the VBT to see whether the eDP is on another port */
5138 bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
5139 {
5140         /*
5141          * eDP not supported on g4x. so bail out early just
5142          * for a bit extra safety in case the VBT is bonkers.
5143          */
5144         if (INTEL_GEN(dev_priv) < 5)
5145                 return false;
5146
5147         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5148                 return true;
5149
5150         return intel_bios_is_port_edp(dev_priv, port);
5151 }
5152
5153 void
5154 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5155 {
5156         struct intel_connector *intel_connector = to_intel_connector(connector);
5157
5158         intel_attach_force_audio_property(connector);
5159         intel_attach_broadcast_rgb_property(connector);
5160         intel_dp->color_range_auto = true;
5161
5162         if (is_edp(intel_dp)) {
5163                 drm_mode_create_scaling_mode_property(connector->dev);
5164                 drm_object_attach_property(
5165                         &connector->base,
5166                         connector->dev->mode_config.scaling_mode_property,
5167                         DRM_MODE_SCALE_ASPECT);
5168                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5169         }
5170 }
5171
5172 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5173 {
5174         intel_dp->panel_power_off_time = ktime_get_boottime();
5175         intel_dp->last_power_on = jiffies;
5176         intel_dp->last_backlight_off = jiffies;
5177 }
5178
5179 static void
5180 intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
5181                            struct intel_dp *intel_dp, struct edp_power_seq *seq)
5182 {
5183         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5184         struct pps_registers regs;
5185
5186         intel_pps_get_registers(dev_priv, intel_dp, &regs);
5187
5188         /* Workaround: Need to write PP_CONTROL with the unlock key as
5189          * the very first thing. */
5190         pp_ctl = ironlake_get_pp_control(intel_dp);
5191
5192         pp_on = I915_READ(regs.pp_on);
5193         pp_off = I915_READ(regs.pp_off);
5194         if (!IS_GEN9_LP(dev_priv)) {
5195                 I915_WRITE(regs.pp_ctrl, pp_ctl);
5196                 pp_div = I915_READ(regs.pp_div);
5197         }
5198
5199         /* Pull timing values out of registers */
5200         seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5201                      PANEL_POWER_UP_DELAY_SHIFT;
5202
5203         seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5204                   PANEL_LIGHT_ON_DELAY_SHIFT;
5205
5206         seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5207                   PANEL_LIGHT_OFF_DELAY_SHIFT;
5208
5209         seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5210                    PANEL_POWER_DOWN_DELAY_SHIFT;
5211
5212         if (IS_GEN9_LP(dev_priv)) {
5213                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5214                         BXT_POWER_CYCLE_DELAY_SHIFT;
5215                 if (tmp > 0)
5216                         seq->t11_t12 = (tmp - 1) * 1000;
5217                 else
5218                         seq->t11_t12 = 0;
5219         } else {
5220                 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5221                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5222         }
5223 }
5224
5225 static void
5226 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
5227 {
5228         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5229                       state_name,
5230                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
5231 }
5232
5233 static void
5234 intel_pps_verify_state(struct drm_i915_private *dev_priv,
5235                        struct intel_dp *intel_dp)
5236 {
5237         struct edp_power_seq hw;
5238         struct edp_power_seq *sw = &intel_dp->pps_delays;
5239
5240         intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
5241
5242         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
5243             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
5244                 DRM_ERROR("PPS state mismatch\n");
5245                 intel_pps_dump_state("sw", sw);
5246                 intel_pps_dump_state("hw", &hw);
5247         }
5248 }
5249
5250 static void
5251 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5252                                     struct intel_dp *intel_dp)
5253 {
5254         struct drm_i915_private *dev_priv = to_i915(dev);
5255         struct edp_power_seq cur, vbt, spec,
5256                 *final = &intel_dp->pps_delays;
5257
5258         lockdep_assert_held(&dev_priv->pps_mutex);
5259
5260         /* already initialized? */
5261         if (final->t11_t12 != 0)
5262                 return;
5263
5264         intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
5265
5266         intel_pps_dump_state("cur", &cur);
5267
5268         vbt = dev_priv->vbt.edp.pps;
5269
5270         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5271          * our hw here, which are all in 100usec. */
5272         spec.t1_t3 = 210 * 10;
5273         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5274         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5275         spec.t10 = 500 * 10;
5276         /* This one is special and actually in units of 100ms, but zero
5277          * based in the hw (so we need to add 100 ms). But the sw vbt
5278          * table multiplies it with 1000 to make it in units of 100usec,
5279          * too. */
5280         spec.t11_t12 = (510 + 100) * 10;
5281
5282         intel_pps_dump_state("vbt", &vbt);
5283
5284         /* Use the max of the register settings and vbt. If both are
5285          * unset, fall back to the spec limits. */
5286 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5287                                        spec.field : \
5288                                        max(cur.field, vbt.field))
5289         assign_final(t1_t3);
5290         assign_final(t8);
5291         assign_final(t9);
5292         assign_final(t10);
5293         assign_final(t11_t12);
5294 #undef assign_final
5295
5296 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5297         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5298         intel_dp->backlight_on_delay = get_delay(t8);
5299         intel_dp->backlight_off_delay = get_delay(t9);
5300         intel_dp->panel_power_down_delay = get_delay(t10);
5301         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5302 #undef get_delay
5303
5304         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5305                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5306                       intel_dp->panel_power_cycle_delay);
5307
5308         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5309                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5310
5311         /*
5312          * We override the HW backlight delays to 1 because we do manual waits
5313          * on them. For T8, even BSpec recommends doing it. For T9, if we
5314          * don't do this, we'll end up waiting for the backlight off delay
5315          * twice: once when we do the manual sleep, and once when we disable
5316          * the panel and wait for the PP_STATUS bit to become zero.
5317          */
5318         final->t8 = 1;
5319         final->t9 = 1;
5320 }
5321
5322 static void
5323 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5324                                               struct intel_dp *intel_dp,
5325                                               bool force_disable_vdd)
5326 {
5327         struct drm_i915_private *dev_priv = to_i915(dev);
5328         u32 pp_on, pp_off, pp_div, port_sel = 0;
5329         int div = dev_priv->rawclk_freq / 1000;
5330         struct pps_registers regs;
5331         enum port port = dp_to_dig_port(intel_dp)->port;
5332         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5333
5334         lockdep_assert_held(&dev_priv->pps_mutex);
5335
5336         intel_pps_get_registers(dev_priv, intel_dp, &regs);
5337
5338         /*
5339          * On some VLV machines the BIOS can leave the VDD
5340          * enabled even on power seqeuencers which aren't
5341          * hooked up to any port. This would mess up the
5342          * power domain tracking the first time we pick
5343          * one of these power sequencers for use since
5344          * edp_panel_vdd_on() would notice that the VDD was
5345          * already on and therefore wouldn't grab the power
5346          * domain reference. Disable VDD first to avoid this.
5347          * This also avoids spuriously turning the VDD on as
5348          * soon as the new power seqeuencer gets initialized.
5349          */
5350         if (force_disable_vdd) {
5351                 u32 pp = ironlake_get_pp_control(intel_dp);
5352
5353                 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5354
5355                 if (pp & EDP_FORCE_VDD)
5356                         DRM_DEBUG_KMS("VDD already on, disabling first\n");
5357
5358                 pp &= ~EDP_FORCE_VDD;
5359
5360                 I915_WRITE(regs.pp_ctrl, pp);
5361         }
5362
5363         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5364                 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5365         pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5366                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5367         /* Compute the divisor for the pp clock, simply match the Bspec
5368          * formula. */
5369         if (IS_GEN9_LP(dev_priv)) {
5370                 pp_div = I915_READ(regs.pp_ctrl);
5371                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5372                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5373                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5374         } else {
5375                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5376                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5377                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5378         }
5379
5380         /* Haswell doesn't have any port selection bits for the panel
5381          * power sequencer any more. */
5382         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5383                 port_sel = PANEL_PORT_SELECT_VLV(port);
5384         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5385                 if (port == PORT_A)
5386                         port_sel = PANEL_PORT_SELECT_DPA;
5387                 else
5388                         port_sel = PANEL_PORT_SELECT_DPD;
5389         }
5390
5391         pp_on |= port_sel;
5392
5393         I915_WRITE(regs.pp_on, pp_on);
5394         I915_WRITE(regs.pp_off, pp_off);
5395         if (IS_GEN9_LP(dev_priv))
5396                 I915_WRITE(regs.pp_ctrl, pp_div);
5397         else
5398                 I915_WRITE(regs.pp_div, pp_div);
5399
5400         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5401                       I915_READ(regs.pp_on),
5402                       I915_READ(regs.pp_off),
5403                       IS_GEN9_LP(dev_priv) ?
5404                       (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5405                       I915_READ(regs.pp_div));
5406 }
5407
5408 static void intel_dp_pps_init(struct drm_device *dev,
5409                               struct intel_dp *intel_dp)
5410 {
5411         struct drm_i915_private *dev_priv = to_i915(dev);
5412
5413         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5414                 vlv_initial_power_sequencer_setup(intel_dp);
5415         } else {
5416                 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5417                 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5418         }
5419 }
5420
5421 /**
5422  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5423  * @dev_priv: i915 device
5424  * @crtc_state: a pointer to the active intel_crtc_state
5425  * @refresh_rate: RR to be programmed
5426  *
5427  * This function gets called when refresh rate (RR) has to be changed from
5428  * one frequency to another. Switches can be between high and low RR
5429  * supported by the panel or to any other RR based on media playback (in
5430  * this case, RR value needs to be passed from user space).
5431  *
5432  * The caller of this function needs to take a lock on dev_priv->drrs.
5433  */
5434 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5435                                     struct intel_crtc_state *crtc_state,
5436                                     int refresh_rate)
5437 {
5438         struct intel_encoder *encoder;
5439         struct intel_digital_port *dig_port = NULL;
5440         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5441         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5442         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5443
5444         if (refresh_rate <= 0) {
5445                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5446                 return;
5447         }
5448
5449         if (intel_dp == NULL) {
5450                 DRM_DEBUG_KMS("DRRS not supported.\n");
5451                 return;
5452         }
5453
5454         /*
5455          * FIXME: This needs proper synchronization with psr state for some
5456          * platforms that cannot have PSR and DRRS enabled at the same time.
5457          */
5458
5459         dig_port = dp_to_dig_port(intel_dp);
5460         encoder = &dig_port->base;
5461         intel_crtc = to_intel_crtc(encoder->base.crtc);
5462
5463         if (!intel_crtc) {
5464                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5465                 return;
5466         }
5467
5468         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5469                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5470                 return;
5471         }
5472
5473         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5474                         refresh_rate)
5475                 index = DRRS_LOW_RR;
5476
5477         if (index == dev_priv->drrs.refresh_rate_type) {
5478                 DRM_DEBUG_KMS(
5479                         "DRRS requested for previously set RR...ignoring\n");
5480                 return;
5481         }
5482
5483         if (!crtc_state->base.active) {
5484                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5485                 return;
5486         }
5487
5488         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5489                 switch (index) {
5490                 case DRRS_HIGH_RR:
5491                         intel_dp_set_m_n(intel_crtc, M1_N1);
5492                         break;
5493                 case DRRS_LOW_RR:
5494                         intel_dp_set_m_n(intel_crtc, M2_N2);
5495                         break;
5496                 case DRRS_MAX_RR:
5497                 default:
5498                         DRM_ERROR("Unsupported refreshrate type\n");
5499                 }
5500         } else if (INTEL_GEN(dev_priv) > 6) {
5501                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5502                 u32 val;
5503
5504                 val = I915_READ(reg);
5505                 if (index > DRRS_HIGH_RR) {
5506                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5507                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5508                         else
5509                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5510                 } else {
5511                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5512                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5513                         else
5514                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5515                 }
5516                 I915_WRITE(reg, val);
5517         }
5518
5519         dev_priv->drrs.refresh_rate_type = index;
5520
5521         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5522 }
5523
5524 /**
5525  * intel_edp_drrs_enable - init drrs struct if supported
5526  * @intel_dp: DP struct
5527  * @crtc_state: A pointer to the active crtc state.
5528  *
5529  * Initializes frontbuffer_bits and drrs.dp
5530  */
5531 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5532                            struct intel_crtc_state *crtc_state)
5533 {
5534         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5535         struct drm_i915_private *dev_priv = to_i915(dev);
5536
5537         if (!crtc_state->has_drrs) {
5538                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5539                 return;
5540         }
5541
5542         mutex_lock(&dev_priv->drrs.mutex);
5543         if (WARN_ON(dev_priv->drrs.dp)) {
5544                 DRM_ERROR("DRRS already enabled\n");
5545                 goto unlock;
5546         }
5547
5548         dev_priv->drrs.busy_frontbuffer_bits = 0;
5549
5550         dev_priv->drrs.dp = intel_dp;
5551
5552 unlock:
5553         mutex_unlock(&dev_priv->drrs.mutex);
5554 }
5555
5556 /**
5557  * intel_edp_drrs_disable - Disable DRRS
5558  * @intel_dp: DP struct
5559  * @old_crtc_state: Pointer to old crtc_state.
5560  *
5561  */
5562 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5563                             struct intel_crtc_state *old_crtc_state)
5564 {
5565         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5566         struct drm_i915_private *dev_priv = to_i915(dev);
5567
5568         if (!old_crtc_state->has_drrs)
5569                 return;
5570
5571         mutex_lock(&dev_priv->drrs.mutex);
5572         if (!dev_priv->drrs.dp) {
5573                 mutex_unlock(&dev_priv->drrs.mutex);
5574                 return;
5575         }
5576
5577         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5578                 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5579                         intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5580
5581         dev_priv->drrs.dp = NULL;
5582         mutex_unlock(&dev_priv->drrs.mutex);
5583
5584         cancel_delayed_work_sync(&dev_priv->drrs.work);
5585 }
5586
5587 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5588 {
5589         struct drm_i915_private *dev_priv =
5590                 container_of(work, typeof(*dev_priv), drrs.work.work);
5591         struct intel_dp *intel_dp;
5592
5593         mutex_lock(&dev_priv->drrs.mutex);
5594
5595         intel_dp = dev_priv->drrs.dp;
5596
5597         if (!intel_dp)
5598                 goto unlock;
5599
5600         /*
5601          * The delayed work can race with an invalidate hence we need to
5602          * recheck.
5603          */
5604
5605         if (dev_priv->drrs.busy_frontbuffer_bits)
5606                 goto unlock;
5607
5608         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5609                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5610
5611                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5612                         intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5613         }
5614
5615 unlock:
5616         mutex_unlock(&dev_priv->drrs.mutex);
5617 }
5618
5619 /**
5620  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5621  * @dev_priv: i915 device
5622  * @frontbuffer_bits: frontbuffer plane tracking bits
5623  *
5624  * This function gets called everytime rendering on the given planes start.
5625  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5626  *
5627  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5628  */
5629 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5630                                unsigned int frontbuffer_bits)
5631 {
5632         struct drm_crtc *crtc;
5633         enum pipe pipe;
5634
5635         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5636                 return;
5637
5638         cancel_delayed_work(&dev_priv->drrs.work);
5639
5640         mutex_lock(&dev_priv->drrs.mutex);
5641         if (!dev_priv->drrs.dp) {
5642                 mutex_unlock(&dev_priv->drrs.mutex);
5643                 return;
5644         }
5645
5646         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5647         pipe = to_intel_crtc(crtc)->pipe;
5648
5649         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5650         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5651
5652         /* invalidate means busy screen hence upclock */
5653         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5654                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5655                         dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5656
5657         mutex_unlock(&dev_priv->drrs.mutex);
5658 }
5659
5660 /**
5661  * intel_edp_drrs_flush - Restart Idleness DRRS
5662  * @dev_priv: i915 device
5663  * @frontbuffer_bits: frontbuffer plane tracking bits
5664  *
5665  * This function gets called every time rendering on the given planes has
5666  * completed or flip on a crtc is completed. So DRRS should be upclocked
5667  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5668  * if no other planes are dirty.
5669  *
5670  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5671  */
5672 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5673                           unsigned int frontbuffer_bits)
5674 {
5675         struct drm_crtc *crtc;
5676         enum pipe pipe;
5677
5678         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5679                 return;
5680
5681         cancel_delayed_work(&dev_priv->drrs.work);
5682
5683         mutex_lock(&dev_priv->drrs.mutex);
5684         if (!dev_priv->drrs.dp) {
5685                 mutex_unlock(&dev_priv->drrs.mutex);
5686                 return;
5687         }
5688
5689         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5690         pipe = to_intel_crtc(crtc)->pipe;
5691
5692         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5693         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5694
5695         /* flush means busy screen hence upclock */
5696         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5697                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5698                                 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5699
5700         /*
5701          * flush also means no more activity hence schedule downclock, if all
5702          * other fbs are quiescent too
5703          */
5704         if (!dev_priv->drrs.busy_frontbuffer_bits)
5705                 schedule_delayed_work(&dev_priv->drrs.work,
5706                                 msecs_to_jiffies(1000));
5707         mutex_unlock(&dev_priv->drrs.mutex);
5708 }
5709
5710 /**
5711  * DOC: Display Refresh Rate Switching (DRRS)
5712  *
5713  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5714  * which enables swtching between low and high refresh rates,
5715  * dynamically, based on the usage scenario. This feature is applicable
5716  * for internal panels.
5717  *
5718  * Indication that the panel supports DRRS is given by the panel EDID, which
5719  * would list multiple refresh rates for one resolution.
5720  *
5721  * DRRS is of 2 types - static and seamless.
5722  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5723  * (may appear as a blink on screen) and is used in dock-undock scenario.
5724  * Seamless DRRS involves changing RR without any visual effect to the user
5725  * and can be used during normal system usage. This is done by programming
5726  * certain registers.
5727  *
5728  * Support for static/seamless DRRS may be indicated in the VBT based on
5729  * inputs from the panel spec.
5730  *
5731  * DRRS saves power by switching to low RR based on usage scenarios.
5732  *
5733  * The implementation is based on frontbuffer tracking implementation.  When
5734  * there is a disturbance on the screen triggered by user activity or a periodic
5735  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5736  * no movement on screen, after a timeout of 1 second, a switch to low RR is
5737  * made.
5738  *
5739  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5740  * and intel_edp_drrs_flush() are called.
5741  *
5742  * DRRS can be further extended to support other internal panels and also
5743  * the scenario of video playback wherein RR is set based on the rate
5744  * requested by userspace.
5745  */
5746
5747 /**
5748  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5749  * @intel_connector: eDP connector
5750  * @fixed_mode: preferred mode of panel
5751  *
5752  * This function is  called only once at driver load to initialize basic
5753  * DRRS stuff.
5754  *
5755  * Returns:
5756  * Downclock mode if panel supports it, else return NULL.
5757  * DRRS support is determined by the presence of downclock mode (apart
5758  * from VBT setting).
5759  */
5760 static struct drm_display_mode *
5761 intel_dp_drrs_init(struct intel_connector *intel_connector,
5762                 struct drm_display_mode *fixed_mode)
5763 {
5764         struct drm_connector *connector = &intel_connector->base;
5765         struct drm_device *dev = connector->dev;
5766         struct drm_i915_private *dev_priv = to_i915(dev);
5767         struct drm_display_mode *downclock_mode = NULL;
5768
5769         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5770         mutex_init(&dev_priv->drrs.mutex);
5771
5772         if (INTEL_GEN(dev_priv) <= 6) {
5773                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5774                 return NULL;
5775         }
5776
5777         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5778                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5779                 return NULL;
5780         }
5781
5782         downclock_mode = intel_find_panel_downclock
5783                                         (dev_priv, fixed_mode, connector);
5784
5785         if (!downclock_mode) {
5786                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5787                 return NULL;
5788         }
5789
5790         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5791
5792         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5793         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5794         return downclock_mode;
5795 }
5796
5797 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5798                                      struct intel_connector *intel_connector)
5799 {
5800         struct drm_connector *connector = &intel_connector->base;
5801         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5802         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5803         struct drm_device *dev = intel_encoder->base.dev;
5804         struct drm_i915_private *dev_priv = to_i915(dev);
5805         struct drm_display_mode *fixed_mode = NULL;
5806         struct drm_display_mode *downclock_mode = NULL;
5807         bool has_dpcd;
5808         struct drm_display_mode *scan;
5809         struct edid *edid;
5810         enum pipe pipe = INVALID_PIPE;
5811
5812         if (!is_edp(intel_dp))
5813                 return true;
5814
5815         /*
5816          * On IBX/CPT we may get here with LVDS already registered. Since the
5817          * driver uses the only internal power sequencer available for both
5818          * eDP and LVDS bail out early in this case to prevent interfering
5819          * with an already powered-on LVDS power sequencer.
5820          */
5821         if (intel_get_lvds_encoder(dev)) {
5822                 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5823                 DRM_INFO("LVDS was detected, not registering eDP\n");
5824
5825                 return false;
5826         }
5827
5828         pps_lock(intel_dp);
5829
5830         intel_dp_init_panel_power_timestamps(intel_dp);
5831         intel_dp_pps_init(dev, intel_dp);
5832         intel_edp_panel_vdd_sanitize(intel_dp);
5833
5834         pps_unlock(intel_dp);
5835
5836         /* Cache DPCD and EDID for edp. */
5837         has_dpcd = intel_edp_init_dpcd(intel_dp);
5838
5839         if (!has_dpcd) {
5840                 /* if this fails, presume the device is a ghost */
5841                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5842                 goto out_vdd_off;
5843         }
5844
5845         mutex_lock(&dev->mode_config.mutex);
5846         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5847         if (edid) {
5848                 if (drm_add_edid_modes(connector, edid)) {
5849                         drm_mode_connector_update_edid_property(connector,
5850                                                                 edid);
5851                         drm_edid_to_eld(connector, edid);
5852                 } else {
5853                         kfree(edid);
5854                         edid = ERR_PTR(-EINVAL);
5855                 }
5856         } else {
5857                 edid = ERR_PTR(-ENOENT);
5858         }
5859         intel_connector->edid = edid;
5860
5861         /* prefer fixed mode from EDID if available */
5862         list_for_each_entry(scan, &connector->probed_modes, head) {
5863                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5864                         fixed_mode = drm_mode_duplicate(dev, scan);
5865                         downclock_mode = intel_dp_drrs_init(
5866                                                 intel_connector, fixed_mode);
5867                         break;
5868                 }
5869         }
5870
5871         /* fallback to VBT if available for eDP */
5872         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5873                 fixed_mode = drm_mode_duplicate(dev,
5874                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5875                 if (fixed_mode) {
5876                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5877                         connector->display_info.width_mm = fixed_mode->width_mm;
5878                         connector->display_info.height_mm = fixed_mode->height_mm;
5879                 }
5880         }
5881         mutex_unlock(&dev->mode_config.mutex);
5882
5883         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5884                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5885                 register_reboot_notifier(&intel_dp->edp_notifier);
5886
5887                 /*
5888                  * Figure out the current pipe for the initial backlight setup.
5889                  * If the current pipe isn't valid, try the PPS pipe, and if that
5890                  * fails just assume pipe A.
5891                  */
5892                 pipe = vlv_active_pipe(intel_dp);
5893
5894                 if (pipe != PIPE_A && pipe != PIPE_B)
5895                         pipe = intel_dp->pps_pipe;
5896
5897                 if (pipe != PIPE_A && pipe != PIPE_B)
5898                         pipe = PIPE_A;
5899
5900                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5901                               pipe_name(pipe));
5902         }
5903
5904         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5905         intel_connector->panel.backlight.power = intel_edp_backlight_power;
5906         intel_panel_setup_backlight(connector, pipe);
5907
5908         return true;
5909
5910 out_vdd_off:
5911         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5912         /*
5913          * vdd might still be enabled do to the delayed vdd off.
5914          * Make sure vdd is actually turned off here.
5915          */
5916         pps_lock(intel_dp);
5917         edp_panel_vdd_off_sync(intel_dp);
5918         pps_unlock(intel_dp);
5919
5920         return false;
5921 }
5922
5923 /* Set up the hotplug pin and aux power domain. */
5924 static void
5925 intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
5926 {
5927         struct intel_encoder *encoder = &intel_dig_port->base;
5928         struct intel_dp *intel_dp = &intel_dig_port->dp;
5929
5930         switch (intel_dig_port->port) {
5931         case PORT_A:
5932                 encoder->hpd_pin = HPD_PORT_A;
5933                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
5934                 break;
5935         case PORT_B:
5936                 encoder->hpd_pin = HPD_PORT_B;
5937                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
5938                 break;
5939         case PORT_C:
5940                 encoder->hpd_pin = HPD_PORT_C;
5941                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
5942                 break;
5943         case PORT_D:
5944                 encoder->hpd_pin = HPD_PORT_D;
5945                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
5946                 break;
5947         case PORT_E:
5948                 encoder->hpd_pin = HPD_PORT_E;
5949
5950                 /* FIXME: Check VBT for actual wiring of PORT E */
5951                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
5952                 break;
5953         default:
5954                 MISSING_CASE(intel_dig_port->port);
5955         }
5956 }
5957
5958 bool
5959 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5960                         struct intel_connector *intel_connector)
5961 {
5962         struct drm_connector *connector = &intel_connector->base;
5963         struct intel_dp *intel_dp = &intel_dig_port->dp;
5964         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5965         struct drm_device *dev = intel_encoder->base.dev;
5966         struct drm_i915_private *dev_priv = to_i915(dev);
5967         enum port port = intel_dig_port->port;
5968         int type;
5969
5970         if (WARN(intel_dig_port->max_lanes < 1,
5971                  "Not enough lanes (%d) for DP on port %c\n",
5972                  intel_dig_port->max_lanes, port_name(port)))
5973                 return false;
5974
5975         intel_dp_set_source_rates(intel_dp);
5976
5977         intel_dp->reset_link_params = true;
5978         intel_dp->pps_pipe = INVALID_PIPE;
5979         intel_dp->active_pipe = INVALID_PIPE;
5980
5981         /* intel_dp vfuncs */
5982         if (INTEL_GEN(dev_priv) >= 9)
5983                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5984         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5985                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5986         else if (HAS_PCH_SPLIT(dev_priv))
5987                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5988         else
5989                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5990
5991         if (INTEL_GEN(dev_priv) >= 9)
5992                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5993         else
5994                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5995
5996         if (HAS_DDI(dev_priv))
5997                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5998
5999         /* Preserve the current hw state. */
6000         intel_dp->DP = I915_READ(intel_dp->output_reg);
6001         intel_dp->attached_connector = intel_connector;
6002
6003         if (intel_dp_is_edp(dev_priv, port))
6004                 type = DRM_MODE_CONNECTOR_eDP;
6005         else
6006                 type = DRM_MODE_CONNECTOR_DisplayPort;
6007
6008         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6009                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6010
6011         /*
6012          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6013          * for DP the encoder type can be set by the caller to
6014          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6015          */
6016         if (type == DRM_MODE_CONNECTOR_eDP)
6017                 intel_encoder->type = INTEL_OUTPUT_EDP;
6018
6019         /* eDP only on port B and/or C on vlv/chv */
6020         if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6021                     is_edp(intel_dp) && port != PORT_B && port != PORT_C))
6022                 return false;
6023
6024         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6025                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6026                         port_name(port));
6027
6028         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6029         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6030
6031         connector->interlace_allowed = true;
6032         connector->doublescan_allowed = 0;
6033
6034         intel_dp_init_connector_port_info(intel_dig_port);
6035
6036         intel_dp_aux_init(intel_dp);
6037
6038         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6039                           edp_panel_vdd_work);
6040
6041         intel_connector_attach_encoder(intel_connector, intel_encoder);
6042
6043         if (HAS_DDI(dev_priv))
6044                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6045         else
6046                 intel_connector->get_hw_state = intel_connector_get_hw_state;
6047
6048         /* init MST on ports that can support it */
6049         if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
6050             (port == PORT_B || port == PORT_C || port == PORT_D))
6051                 intel_dp_mst_encoder_init(intel_dig_port,
6052                                           intel_connector->base.base.id);
6053
6054         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6055                 intel_dp_aux_fini(intel_dp);
6056                 intel_dp_mst_encoder_cleanup(intel_dig_port);
6057                 goto fail;
6058         }
6059
6060         intel_dp_add_properties(intel_dp, connector);
6061
6062         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6063          * 0xd.  Failure to do so will result in spurious interrupts being
6064          * generated on the port when a cable is not attached.
6065          */
6066         if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
6067                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6068                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6069         }
6070
6071         return true;
6072
6073 fail:
6074         drm_connector_cleanup(connector);
6075
6076         return false;
6077 }
6078
6079 bool intel_dp_init(struct drm_i915_private *dev_priv,
6080                    i915_reg_t output_reg,
6081                    enum port port)
6082 {
6083         struct intel_digital_port *intel_dig_port;
6084         struct intel_encoder *intel_encoder;
6085         struct drm_encoder *encoder;
6086         struct intel_connector *intel_connector;
6087
6088         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6089         if (!intel_dig_port)
6090                 return false;
6091
6092         intel_connector = intel_connector_alloc();
6093         if (!intel_connector)
6094                 goto err_connector_alloc;
6095
6096         intel_encoder = &intel_dig_port->base;
6097         encoder = &intel_encoder->base;
6098
6099         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
6100                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
6101                              "DP %c", port_name(port)))
6102                 goto err_encoder_init;
6103
6104         intel_encoder->compute_config = intel_dp_compute_config;
6105         intel_encoder->disable = intel_disable_dp;
6106         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6107         intel_encoder->get_config = intel_dp_get_config;
6108         intel_encoder->suspend = intel_dp_encoder_suspend;
6109         if (IS_CHERRYVIEW(dev_priv)) {
6110                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6111                 intel_encoder->pre_enable = chv_pre_enable_dp;
6112                 intel_encoder->enable = vlv_enable_dp;
6113                 intel_encoder->post_disable = chv_post_disable_dp;
6114                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6115         } else if (IS_VALLEYVIEW(dev_priv)) {
6116                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6117                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6118                 intel_encoder->enable = vlv_enable_dp;
6119                 intel_encoder->post_disable = vlv_post_disable_dp;
6120         } else {
6121                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6122                 intel_encoder->enable = g4x_enable_dp;
6123                 if (INTEL_GEN(dev_priv) >= 5)
6124                         intel_encoder->post_disable = ilk_post_disable_dp;
6125         }
6126
6127         intel_dig_port->port = port;
6128         intel_dig_port->dp.output_reg = output_reg;
6129         intel_dig_port->max_lanes = 4;
6130
6131         intel_encoder->type = INTEL_OUTPUT_DP;
6132         intel_encoder->power_domain = intel_port_to_power_domain(port);
6133         if (IS_CHERRYVIEW(dev_priv)) {
6134                 if (port == PORT_D)
6135                         intel_encoder->crtc_mask = 1 << 2;
6136                 else
6137                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6138         } else {
6139                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6140         }
6141         intel_encoder->cloneable = 0;
6142         intel_encoder->port = port;
6143
6144         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6145         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6146
6147         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6148                 goto err_init_connector;
6149
6150         return true;
6151
6152 err_init_connector:
6153         drm_encoder_cleanup(encoder);
6154 err_encoder_init:
6155         kfree(intel_connector);
6156 err_connector_alloc:
6157         kfree(intel_dig_port);
6158         return false;
6159 }
6160
6161 void intel_dp_mst_suspend(struct drm_device *dev)
6162 {
6163         struct drm_i915_private *dev_priv = to_i915(dev);
6164         int i;
6165
6166         /* disable MST */
6167         for (i = 0; i < I915_MAX_PORTS; i++) {
6168                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6169
6170                 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6171                         continue;
6172
6173                 if (intel_dig_port->dp.is_mst)
6174                         drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6175         }
6176 }
6177
6178 void intel_dp_mst_resume(struct drm_device *dev)
6179 {
6180         struct drm_i915_private *dev_priv = to_i915(dev);
6181         int i;
6182
6183         for (i = 0; i < I915_MAX_PORTS; i++) {
6184                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6185                 int ret;
6186
6187                 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6188                         continue;
6189
6190                 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6191                 if (ret)
6192                         intel_dp_check_mst_status(&intel_dig_port->dp);
6193         }
6194 }