drm/i915/pps: rename vlv_init_panel_power_sequencer to vlv_pps_init
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/types.h>
33
34 #include <asm/byteorder.h>
35
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_probe_helper.h>
41
42 #include "i915_debugfs.h"
43 #include "i915_drv.h"
44 #include "i915_trace.h"
45 #include "intel_atomic.h"
46 #include "intel_audio.h"
47 #include "intel_connector.h"
48 #include "intel_ddi.h"
49 #include "intel_display_types.h"
50 #include "intel_dp.h"
51 #include "intel_dp_link_training.h"
52 #include "intel_dp_mst.h"
53 #include "intel_dpio_phy.h"
54 #include "intel_fifo_underrun.h"
55 #include "intel_hdcp.h"
56 #include "intel_hdmi.h"
57 #include "intel_hotplug.h"
58 #include "intel_lspcon.h"
59 #include "intel_lvds.h"
60 #include "intel_panel.h"
61 #include "intel_pps.h"
62 #include "intel_psr.h"
63 #include "intel_sideband.h"
64 #include "intel_tc.h"
65 #include "intel_vdsc.h"
66
67 #define DP_DPRX_ESI_LEN 14
68
69 /* DP DSC throughput values used for slice count calculations KPixels/s */
70 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
71 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
72 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
73
74 /* DP DSC FEC Overhead factor = 1/(0.972261) */
75 #define DP_DSC_FEC_OVERHEAD_FACTOR              972261
76
77 /* Compliance test status bits  */
78 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
79 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
80 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
81 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
82
83 struct dp_link_dpll {
84         int clock;
85         struct dpll dpll;
86 };
87
88 static const struct dp_link_dpll g4x_dpll[] = {
89         { 162000,
90                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
91         { 270000,
92                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
93 };
94
95 static const struct dp_link_dpll pch_dpll[] = {
96         { 162000,
97                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
98         { 270000,
99                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
100 };
101
102 static const struct dp_link_dpll vlv_dpll[] = {
103         { 162000,
104                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
105         { 270000,
106                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
107 };
108
109 /*
110  * CHV supports eDP 1.4 that have  more link rates.
111  * Below only provides the fixed rate but exclude variable rate.
112  */
113 static const struct dp_link_dpll chv_dpll[] = {
114         /*
115          * CHV requires to program fractional division for m2.
116          * m2 is stored in fixed point format using formula below
117          * (m2_int << 22) | m2_fraction
118          */
119         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
120                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
121         { 270000,       /* m2_int = 27, m2_fraction = 0 */
122                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
123 };
124
125 const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
126 {
127         return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
128 }
129
130 /* Constants for DP DSC configurations */
131 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
132
133 /* With Single pipe configuration, HW is capable of supporting maximum
134  * of 4 slices per line.
135  */
136 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
137
138 /**
139  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
140  * @intel_dp: DP struct
141  *
142  * If a CPU or PCH DP output is attached to an eDP panel, this function
143  * will return true, and false otherwise.
144  */
145 bool intel_dp_is_edp(struct intel_dp *intel_dp)
146 {
147         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
148
149         return dig_port->base.type == INTEL_OUTPUT_EDP;
150 }
151
152 static void intel_dp_link_down(struct intel_encoder *encoder,
153                                const struct intel_crtc_state *old_crtc_state);
154 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
155
156 /* update sink rates from dpcd */
157 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
158 {
159         static const int dp_rates[] = {
160                 162000, 270000, 540000, 810000
161         };
162         int i, max_rate;
163         int max_lttpr_rate;
164
165         if (drm_dp_has_quirk(&intel_dp->desc, 0,
166                              DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
167                 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
168                 static const int quirk_rates[] = { 162000, 270000, 324000 };
169
170                 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
171                 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
172
173                 return;
174         }
175
176         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177         max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
178         if (max_lttpr_rate)
179                 max_rate = min(max_rate, max_lttpr_rate);
180
181         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
182                 if (dp_rates[i] > max_rate)
183                         break;
184                 intel_dp->sink_rates[i] = dp_rates[i];
185         }
186
187         intel_dp->num_sink_rates = i;
188 }
189
190 /* Get length of rates array potentially limited by max_rate. */
191 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
192 {
193         int i;
194
195         /* Limit results by potentially reduced max rate */
196         for (i = 0; i < len; i++) {
197                 if (rates[len - i - 1] <= max_rate)
198                         return len - i;
199         }
200
201         return 0;
202 }
203
204 /* Get length of common rates array potentially limited by max_rate. */
205 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
206                                           int max_rate)
207 {
208         return intel_dp_rate_limit_len(intel_dp->common_rates,
209                                        intel_dp->num_common_rates, max_rate);
210 }
211
212 /* Theoretical max between source and sink */
213 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
214 {
215         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
216 }
217
218 /* Theoretical max between source and sink */
219 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
220 {
221         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
222         int source_max = dig_port->max_lanes;
223         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
224         int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
225         int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
226
227         if (lttpr_max)
228                 sink_max = min(sink_max, lttpr_max);
229
230         return min3(source_max, sink_max, fia_max);
231 }
232
233 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
234 {
235         return intel_dp->max_link_lane_count;
236 }
237
238 int
239 intel_dp_link_required(int pixel_clock, int bpp)
240 {
241         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
242         return DIV_ROUND_UP(pixel_clock * bpp, 8);
243 }
244
245 int
246 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
247 {
248         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
249          * link rate that is generally expressed in Gbps. Since, 8 bits of data
250          * is transmitted every LS_Clk per lane, there is no need to account for
251          * the channel encoding that is done in the PHY layer here.
252          */
253
254         return max_link_clock * max_lanes;
255 }
256
257 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
258 {
259         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
260         struct intel_encoder *encoder = &intel_dig_port->base;
261         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
262
263         return INTEL_GEN(dev_priv) >= 12 ||
264                 (INTEL_GEN(dev_priv) == 11 &&
265                  encoder->port != PORT_A);
266 }
267
268 static int cnl_max_source_rate(struct intel_dp *intel_dp)
269 {
270         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
271         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
272         enum port port = dig_port->base.port;
273
274         u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
275
276         /* Low voltage SKUs are limited to max of 5.4G */
277         if (voltage == VOLTAGE_INFO_0_85V)
278                 return 540000;
279
280         /* For this SKU 8.1G is supported in all ports */
281         if (IS_CNL_WITH_PORT_F(dev_priv))
282                 return 810000;
283
284         /* For other SKUs, max rate on ports A and D is 5.4G */
285         if (port == PORT_A || port == PORT_D)
286                 return 540000;
287
288         return 810000;
289 }
290
291 static int icl_max_source_rate(struct intel_dp *intel_dp)
292 {
293         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
294         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
295         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
296
297         if (intel_phy_is_combo(dev_priv, phy) &&
298             !intel_dp_is_edp(intel_dp))
299                 return 540000;
300
301         return 810000;
302 }
303
304 static int ehl_max_source_rate(struct intel_dp *intel_dp)
305 {
306         if (intel_dp_is_edp(intel_dp))
307                 return 540000;
308
309         return 810000;
310 }
311
312 static void
313 intel_dp_set_source_rates(struct intel_dp *intel_dp)
314 {
315         /* The values must be in increasing order */
316         static const int cnl_rates[] = {
317                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
318         };
319         static const int bxt_rates[] = {
320                 162000, 216000, 243000, 270000, 324000, 432000, 540000
321         };
322         static const int skl_rates[] = {
323                 162000, 216000, 270000, 324000, 432000, 540000
324         };
325         static const int hsw_rates[] = {
326                 162000, 270000, 540000
327         };
328         static const int g4x_rates[] = {
329                 162000, 270000
330         };
331         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
332         struct intel_encoder *encoder = &dig_port->base;
333         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
334         const int *source_rates;
335         int size, max_rate = 0, vbt_max_rate;
336
337         /* This should only be done once */
338         drm_WARN_ON(&dev_priv->drm,
339                     intel_dp->source_rates || intel_dp->num_source_rates);
340
341         if (INTEL_GEN(dev_priv) >= 10) {
342                 source_rates = cnl_rates;
343                 size = ARRAY_SIZE(cnl_rates);
344                 if (IS_GEN(dev_priv, 10))
345                         max_rate = cnl_max_source_rate(intel_dp);
346                 else if (IS_JSL_EHL(dev_priv))
347                         max_rate = ehl_max_source_rate(intel_dp);
348                 else
349                         max_rate = icl_max_source_rate(intel_dp);
350         } else if (IS_GEN9_LP(dev_priv)) {
351                 source_rates = bxt_rates;
352                 size = ARRAY_SIZE(bxt_rates);
353         } else if (IS_GEN9_BC(dev_priv)) {
354                 source_rates = skl_rates;
355                 size = ARRAY_SIZE(skl_rates);
356         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
357                    IS_BROADWELL(dev_priv)) {
358                 source_rates = hsw_rates;
359                 size = ARRAY_SIZE(hsw_rates);
360         } else {
361                 source_rates = g4x_rates;
362                 size = ARRAY_SIZE(g4x_rates);
363         }
364
365         vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
366         if (max_rate && vbt_max_rate)
367                 max_rate = min(max_rate, vbt_max_rate);
368         else if (vbt_max_rate)
369                 max_rate = vbt_max_rate;
370
371         if (max_rate)
372                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
373
374         intel_dp->source_rates = source_rates;
375         intel_dp->num_source_rates = size;
376 }
377
378 static int intersect_rates(const int *source_rates, int source_len,
379                            const int *sink_rates, int sink_len,
380                            int *common_rates)
381 {
382         int i = 0, j = 0, k = 0;
383
384         while (i < source_len && j < sink_len) {
385                 if (source_rates[i] == sink_rates[j]) {
386                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
387                                 return k;
388                         common_rates[k] = source_rates[i];
389                         ++k;
390                         ++i;
391                         ++j;
392                 } else if (source_rates[i] < sink_rates[j]) {
393                         ++i;
394                 } else {
395                         ++j;
396                 }
397         }
398         return k;
399 }
400
401 /* return index of rate in rates array, or -1 if not found */
402 static int intel_dp_rate_index(const int *rates, int len, int rate)
403 {
404         int i;
405
406         for (i = 0; i < len; i++)
407                 if (rate == rates[i])
408                         return i;
409
410         return -1;
411 }
412
413 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
414 {
415         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
416
417         drm_WARN_ON(&i915->drm,
418                     !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
419
420         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
421                                                      intel_dp->num_source_rates,
422                                                      intel_dp->sink_rates,
423                                                      intel_dp->num_sink_rates,
424                                                      intel_dp->common_rates);
425
426         /* Paranoia, there should always be something in common. */
427         if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
428                 intel_dp->common_rates[0] = 162000;
429                 intel_dp->num_common_rates = 1;
430         }
431 }
432
433 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
434                                        u8 lane_count)
435 {
436         /*
437          * FIXME: we need to synchronize the current link parameters with
438          * hardware readout. Currently fast link training doesn't work on
439          * boot-up.
440          */
441         if (link_rate == 0 ||
442             link_rate > intel_dp->max_link_rate)
443                 return false;
444
445         if (lane_count == 0 ||
446             lane_count > intel_dp_max_lane_count(intel_dp))
447                 return false;
448
449         return true;
450 }
451
452 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
453                                                      int link_rate,
454                                                      u8 lane_count)
455 {
456         const struct drm_display_mode *fixed_mode =
457                 intel_dp->attached_connector->panel.fixed_mode;
458         int mode_rate, max_rate;
459
460         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
461         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
462         if (mode_rate > max_rate)
463                 return false;
464
465         return true;
466 }
467
468 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
469                                             int link_rate, u8 lane_count)
470 {
471         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
472         int index;
473
474         /*
475          * TODO: Enable fallback on MST links once MST link compute can handle
476          * the fallback params.
477          */
478         if (intel_dp->is_mst) {
479                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
480                 return -1;
481         }
482
483         if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
484                 drm_dbg_kms(&i915->drm,
485                             "Retrying Link training for eDP with max parameters\n");
486                 intel_dp->use_max_params = true;
487                 return 0;
488         }
489
490         index = intel_dp_rate_index(intel_dp->common_rates,
491                                     intel_dp->num_common_rates,
492                                     link_rate);
493         if (index > 0) {
494                 if (intel_dp_is_edp(intel_dp) &&
495                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
496                                                               intel_dp->common_rates[index - 1],
497                                                               lane_count)) {
498                         drm_dbg_kms(&i915->drm,
499                                     "Retrying Link training for eDP with same parameters\n");
500                         return 0;
501                 }
502                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
503                 intel_dp->max_link_lane_count = lane_count;
504         } else if (lane_count > 1) {
505                 if (intel_dp_is_edp(intel_dp) &&
506                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
507                                                               intel_dp_max_common_rate(intel_dp),
508                                                               lane_count >> 1)) {
509                         drm_dbg_kms(&i915->drm,
510                                     "Retrying Link training for eDP with same parameters\n");
511                         return 0;
512                 }
513                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
514                 intel_dp->max_link_lane_count = lane_count >> 1;
515         } else {
516                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
517                 return -1;
518         }
519
520         return 0;
521 }
522
523 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
524 {
525         return div_u64(mul_u32_u32(mode_clock, 1000000U),
526                        DP_DSC_FEC_OVERHEAD_FACTOR);
527 }
528
529 static int
530 small_joiner_ram_size_bits(struct drm_i915_private *i915)
531 {
532         if (INTEL_GEN(i915) >= 11)
533                 return 7680 * 8;
534         else
535                 return 6144 * 8;
536 }
537
538 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
539                                        u32 link_clock, u32 lane_count,
540                                        u32 mode_clock, u32 mode_hdisplay,
541                                        bool bigjoiner)
542 {
543         u32 bits_per_pixel, max_bpp_small_joiner_ram;
544         int i;
545
546         /*
547          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
548          * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
549          * for SST -> TimeSlotsPerMTP is 1,
550          * for MST -> TimeSlotsPerMTP has to be calculated
551          */
552         bits_per_pixel = (link_clock * lane_count * 8) /
553                          intel_dp_mode_to_fec_clock(mode_clock);
554         drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
555
556         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
557         max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
558                 mode_hdisplay;
559
560         if (bigjoiner)
561                 max_bpp_small_joiner_ram *= 2;
562
563         drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
564                     max_bpp_small_joiner_ram);
565
566         /*
567          * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
568          * check, output bpp from small joiner RAM check)
569          */
570         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
571
572         if (bigjoiner) {
573                 u32 max_bpp_bigjoiner =
574                         i915->max_cdclk_freq * 48 /
575                         intel_dp_mode_to_fec_clock(mode_clock);
576
577                 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner);
578                 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
579         }
580
581         /* Error out if the max bpp is less than smallest allowed valid bpp */
582         if (bits_per_pixel < valid_dsc_bpp[0]) {
583                 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
584                             bits_per_pixel, valid_dsc_bpp[0]);
585                 return 0;
586         }
587
588         /* Find the nearest match in the array of known BPPs from VESA */
589         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
590                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
591                         break;
592         }
593         bits_per_pixel = valid_dsc_bpp[i];
594
595         /*
596          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
597          * fractional part is 0
598          */
599         return bits_per_pixel << 4;
600 }
601
602 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
603                                        int mode_clock, int mode_hdisplay,
604                                        bool bigjoiner)
605 {
606         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
607         u8 min_slice_count, i;
608         int max_slice_width;
609
610         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
611                 min_slice_count = DIV_ROUND_UP(mode_clock,
612                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
613         else
614                 min_slice_count = DIV_ROUND_UP(mode_clock,
615                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
616
617         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
618         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
619                 drm_dbg_kms(&i915->drm,
620                             "Unsupported slice width %d by DP DSC Sink device\n",
621                             max_slice_width);
622                 return 0;
623         }
624         /* Also take into account max slice width */
625         min_slice_count = max_t(u8, min_slice_count,
626                                 DIV_ROUND_UP(mode_hdisplay,
627                                              max_slice_width));
628
629         /* Find the closest match to the valid slice count values */
630         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
631                 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
632
633                 if (test_slice_count >
634                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false))
635                         break;
636
637                 /* big joiner needs small joiner to be enabled */
638                 if (bigjoiner && test_slice_count < 4)
639                         continue;
640
641                 if (min_slice_count <= test_slice_count)
642                         return test_slice_count;
643         }
644
645         drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
646                     min_slice_count);
647         return 0;
648 }
649
650 static enum intel_output_format
651 intel_dp_output_format(struct drm_connector *connector,
652                        const struct drm_display_mode *mode)
653 {
654         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
655         const struct drm_display_info *info = &connector->display_info;
656
657         if (!connector->ycbcr_420_allowed ||
658             !drm_mode_is_420_only(info, mode))
659                 return INTEL_OUTPUT_FORMAT_RGB;
660
661         if (intel_dp->dfp.rgb_to_ycbcr &&
662             intel_dp->dfp.ycbcr_444_to_420)
663                 return INTEL_OUTPUT_FORMAT_RGB;
664
665         if (intel_dp->dfp.ycbcr_444_to_420)
666                 return INTEL_OUTPUT_FORMAT_YCBCR444;
667         else
668                 return INTEL_OUTPUT_FORMAT_YCBCR420;
669 }
670
671 int intel_dp_min_bpp(enum intel_output_format output_format)
672 {
673         if (output_format == INTEL_OUTPUT_FORMAT_RGB)
674                 return 6 * 3;
675         else
676                 return 8 * 3;
677 }
678
679 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
680 {
681         /*
682          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
683          * format of the number of bytes per pixel will be half the number
684          * of bytes of RGB pixel.
685          */
686         if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
687                 bpp /= 2;
688
689         return bpp;
690 }
691
692 static int
693 intel_dp_mode_min_output_bpp(struct drm_connector *connector,
694                              const struct drm_display_mode *mode)
695 {
696         enum intel_output_format output_format =
697                 intel_dp_output_format(connector, mode);
698
699         return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
700 }
701
702 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
703                                   int hdisplay)
704 {
705         /*
706          * Older platforms don't like hdisplay==4096 with DP.
707          *
708          * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
709          * and frame counter increment), but we don't get vblank interrupts,
710          * and the pipe underruns immediately. The link also doesn't seem
711          * to get trained properly.
712          *
713          * On CHV the vblank interrupts don't seem to disappear but
714          * otherwise the symptoms are similar.
715          *
716          * TODO: confirm the behaviour on HSW+
717          */
718         return hdisplay == 4096 && !HAS_DDI(dev_priv);
719 }
720
721 static enum drm_mode_status
722 intel_dp_mode_valid_downstream(struct intel_connector *connector,
723                                const struct drm_display_mode *mode,
724                                int target_clock)
725 {
726         struct intel_dp *intel_dp = intel_attached_dp(connector);
727         const struct drm_display_info *info = &connector->base.display_info;
728         int tmds_clock;
729
730         /* If PCON supports FRL MODE, check FRL bandwidth constraints */
731         if (intel_dp->dfp.pcon_max_frl_bw) {
732                 int target_bw;
733                 int max_frl_bw;
734                 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
735
736                 target_bw = bpp * target_clock;
737
738                 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
739
740                 /* converting bw from Gbps to Kbps*/
741                 max_frl_bw = max_frl_bw * 1000000;
742
743                 if (target_bw > max_frl_bw)
744                         return MODE_CLOCK_HIGH;
745
746                 return MODE_OK;
747         }
748
749         if (intel_dp->dfp.max_dotclock &&
750             target_clock > intel_dp->dfp.max_dotclock)
751                 return MODE_CLOCK_HIGH;
752
753         /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
754         tmds_clock = target_clock;
755         if (drm_mode_is_420_only(info, mode))
756                 tmds_clock /= 2;
757
758         if (intel_dp->dfp.min_tmds_clock &&
759             tmds_clock < intel_dp->dfp.min_tmds_clock)
760                 return MODE_CLOCK_LOW;
761         if (intel_dp->dfp.max_tmds_clock &&
762             tmds_clock > intel_dp->dfp.max_tmds_clock)
763                 return MODE_CLOCK_HIGH;
764
765         return MODE_OK;
766 }
767
768 static enum drm_mode_status
769 intel_dp_mode_valid(struct drm_connector *connector,
770                     struct drm_display_mode *mode)
771 {
772         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
773         struct intel_connector *intel_connector = to_intel_connector(connector);
774         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
775         struct drm_i915_private *dev_priv = to_i915(connector->dev);
776         int target_clock = mode->clock;
777         int max_rate, mode_rate, max_lanes, max_link_clock;
778         int max_dotclk = dev_priv->max_dotclk_freq;
779         u16 dsc_max_output_bpp = 0;
780         u8 dsc_slice_count = 0;
781         enum drm_mode_status status;
782         bool dsc = false, bigjoiner = false;
783
784         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
785                 return MODE_NO_DBLESCAN;
786
787         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
788                 return MODE_H_ILLEGAL;
789
790         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
791                 if (mode->hdisplay > fixed_mode->hdisplay)
792                         return MODE_PANEL;
793
794                 if (mode->vdisplay > fixed_mode->vdisplay)
795                         return MODE_PANEL;
796
797                 target_clock = fixed_mode->clock;
798         }
799
800         if (mode->clock < 10000)
801                 return MODE_CLOCK_LOW;
802
803         if ((target_clock > max_dotclk || mode->hdisplay > 5120) &&
804             intel_dp_can_bigjoiner(intel_dp)) {
805                 bigjoiner = true;
806                 max_dotclk *= 2;
807         }
808         if (target_clock > max_dotclk)
809                 return MODE_CLOCK_HIGH;
810
811         max_link_clock = intel_dp_max_link_rate(intel_dp);
812         max_lanes = intel_dp_max_lane_count(intel_dp);
813
814         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
815         mode_rate = intel_dp_link_required(target_clock,
816                                            intel_dp_mode_min_output_bpp(connector, mode));
817
818         if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
819                 return MODE_H_ILLEGAL;
820
821         /*
822          * Output bpp is stored in 6.4 format so right shift by 4 to get the
823          * integer value since we support only integer values of bpp.
824          */
825         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
826             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
827                 if (intel_dp_is_edp(intel_dp)) {
828                         dsc_max_output_bpp =
829                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
830                         dsc_slice_count =
831                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
832                                                                 true);
833                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
834                         dsc_max_output_bpp =
835                                 intel_dp_dsc_get_output_bpp(dev_priv,
836                                                             max_link_clock,
837                                                             max_lanes,
838                                                             target_clock,
839                                                             mode->hdisplay,
840                                                             bigjoiner) >> 4;
841                         dsc_slice_count =
842                                 intel_dp_dsc_get_slice_count(intel_dp,
843                                                              target_clock,
844                                                              mode->hdisplay,
845                                                              bigjoiner);
846                 }
847
848                 dsc = dsc_max_output_bpp && dsc_slice_count;
849         }
850
851         /* big joiner configuration needs DSC */
852         if (bigjoiner && !dsc)
853                 return MODE_CLOCK_HIGH;
854
855         if (mode_rate > max_rate && !dsc)
856                 return MODE_CLOCK_HIGH;
857
858         status = intel_dp_mode_valid_downstream(intel_connector,
859                                                 mode, target_clock);
860         if (status != MODE_OK)
861                 return status;
862
863         return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
864 }
865
866 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
867 {
868         int i;
869         u32 v = 0;
870
871         if (src_bytes > 4)
872                 src_bytes = 4;
873         for (i = 0; i < src_bytes; i++)
874                 v |= ((u32)src[i]) << ((3 - i) * 8);
875         return v;
876 }
877
878 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
879 {
880         int i;
881         if (dst_bytes > 4)
882                 dst_bytes = 4;
883         for (i = 0; i < dst_bytes; i++)
884                 dst[i] = src >> ((3-i) * 8);
885 }
886
887 static u32
888 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
889 {
890         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
891         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
892         const unsigned int timeout_ms = 10;
893         u32 status;
894         bool done;
895
896 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
897         done = wait_event_timeout(i915->gmbus_wait_queue, C,
898                                   msecs_to_jiffies_timeout(timeout_ms));
899
900         /* just trace the final value */
901         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
902
903         if (!done)
904                 drm_err(&i915->drm,
905                         "%s: did not complete or timeout within %ums (status 0x%08x)\n",
906                         intel_dp->aux.name, timeout_ms, status);
907 #undef C
908
909         return status;
910 }
911
912 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
913 {
914         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
915
916         if (index)
917                 return 0;
918
919         /*
920          * The clock divider is based off the hrawclk, and would like to run at
921          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
922          */
923         return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
924 }
925
926 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
927 {
928         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
929         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
930         u32 freq;
931
932         if (index)
933                 return 0;
934
935         /*
936          * The clock divider is based off the cdclk or PCH rawclk, and would
937          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
938          * divide by 2000 and use that
939          */
940         if (dig_port->aux_ch == AUX_CH_A)
941                 freq = dev_priv->cdclk.hw.cdclk;
942         else
943                 freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
944         return DIV_ROUND_CLOSEST(freq, 2000);
945 }
946
947 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
948 {
949         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
950         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
951
952         if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
953                 /* Workaround for non-ULT HSW */
954                 switch (index) {
955                 case 0: return 63;
956                 case 1: return 72;
957                 default: return 0;
958                 }
959         }
960
961         return ilk_get_aux_clock_divider(intel_dp, index);
962 }
963
964 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
965 {
966         /*
967          * SKL doesn't need us to program the AUX clock divider (Hardware will
968          * derive the clock from CDCLK automatically). We still implement the
969          * get_aux_clock_divider vfunc to plug-in into the existing code.
970          */
971         return index ? 0 : 1;
972 }
973
974 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
975                                 int send_bytes,
976                                 u32 aux_clock_divider)
977 {
978         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
979         struct drm_i915_private *dev_priv =
980                         to_i915(dig_port->base.base.dev);
981         u32 precharge, timeout;
982
983         if (IS_GEN(dev_priv, 6))
984                 precharge = 3;
985         else
986                 precharge = 5;
987
988         if (IS_BROADWELL(dev_priv))
989                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
990         else
991                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
992
993         return DP_AUX_CH_CTL_SEND_BUSY |
994                DP_AUX_CH_CTL_DONE |
995                DP_AUX_CH_CTL_INTERRUPT |
996                DP_AUX_CH_CTL_TIME_OUT_ERROR |
997                timeout |
998                DP_AUX_CH_CTL_RECEIVE_ERROR |
999                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1000                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1001                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1002 }
1003
1004 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1005                                 int send_bytes,
1006                                 u32 unused)
1007 {
1008         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1009         struct drm_i915_private *i915 =
1010                         to_i915(dig_port->base.base.dev);
1011         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1012         u32 ret;
1013
1014         ret = DP_AUX_CH_CTL_SEND_BUSY |
1015               DP_AUX_CH_CTL_DONE |
1016               DP_AUX_CH_CTL_INTERRUPT |
1017               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1018               DP_AUX_CH_CTL_TIME_OUT_MAX |
1019               DP_AUX_CH_CTL_RECEIVE_ERROR |
1020               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1021               DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1022               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1023
1024         if (intel_phy_is_tc(i915, phy) &&
1025             dig_port->tc_mode == TC_PORT_TBT_ALT)
1026                 ret |= DP_AUX_CH_CTL_TBT_IO;
1027
1028         return ret;
1029 }
1030
1031 static int
1032 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1033                   const u8 *send, int send_bytes,
1034                   u8 *recv, int recv_size,
1035                   u32 aux_send_ctl_flags)
1036 {
1037         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1038         struct drm_i915_private *i915 =
1039                         to_i915(dig_port->base.base.dev);
1040         struct intel_uncore *uncore = &i915->uncore;
1041         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1042         bool is_tc_port = intel_phy_is_tc(i915, phy);
1043         i915_reg_t ch_ctl, ch_data[5];
1044         u32 aux_clock_divider;
1045         enum intel_display_power_domain aux_domain;
1046         intel_wakeref_t aux_wakeref;
1047         intel_wakeref_t pps_wakeref;
1048         int i, ret, recv_bytes;
1049         int try, clock = 0;
1050         u32 status;
1051         bool vdd;
1052
1053         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1054         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1055                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1056
1057         if (is_tc_port)
1058                 intel_tc_port_lock(dig_port);
1059
1060         aux_domain = intel_aux_power_domain(dig_port);
1061
1062         aux_wakeref = intel_display_power_get(i915, aux_domain);
1063         pps_wakeref = intel_pps_lock(intel_dp);
1064
1065         /*
1066          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1067          * In such cases we want to leave VDD enabled and it's up to upper layers
1068          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1069          * ourselves.
1070          */
1071         vdd = intel_pps_vdd_on_unlocked(intel_dp);
1072
1073         /* dp aux is extremely sensitive to irq latency, hence request the
1074          * lowest possible wakeup latency and so prevent the cpu from going into
1075          * deep sleep states.
1076          */
1077         cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
1078
1079         intel_pps_check_power_unlocked(intel_dp);
1080
1081         /* Try to wait for any previous AUX channel activity */
1082         for (try = 0; try < 3; try++) {
1083                 status = intel_uncore_read_notrace(uncore, ch_ctl);
1084                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1085                         break;
1086                 msleep(1);
1087         }
1088         /* just trace the final value */
1089         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1090
1091         if (try == 3) {
1092                 const u32 status = intel_uncore_read(uncore, ch_ctl);
1093
1094                 if (status != intel_dp->aux_busy_last_status) {
1095                         drm_WARN(&i915->drm, 1,
1096                                  "%s: not started (status 0x%08x)\n",
1097                                  intel_dp->aux.name, status);
1098                         intel_dp->aux_busy_last_status = status;
1099                 }
1100
1101                 ret = -EBUSY;
1102                 goto out;
1103         }
1104
1105         /* Only 5 data registers! */
1106         if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
1107                 ret = -E2BIG;
1108                 goto out;
1109         }
1110
1111         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1112                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1113                                                           send_bytes,
1114                                                           aux_clock_divider);
1115
1116                 send_ctl |= aux_send_ctl_flags;
1117
1118                 /* Must try at least 3 times according to DP spec */
1119                 for (try = 0; try < 5; try++) {
1120                         /* Load the send data into the aux channel data registers */
1121                         for (i = 0; i < send_bytes; i += 4)
1122                                 intel_uncore_write(uncore,
1123                                                    ch_data[i >> 2],
1124                                                    intel_dp_pack_aux(send + i,
1125                                                                      send_bytes - i));
1126
1127                         /* Send the command and wait for it to complete */
1128                         intel_uncore_write(uncore, ch_ctl, send_ctl);
1129
1130                         status = intel_dp_aux_wait_done(intel_dp);
1131
1132                         /* Clear done status and any errors */
1133                         intel_uncore_write(uncore,
1134                                            ch_ctl,
1135                                            status |
1136                                            DP_AUX_CH_CTL_DONE |
1137                                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
1138                                            DP_AUX_CH_CTL_RECEIVE_ERROR);
1139
1140                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1141                          *   400us delay required for errors and timeouts
1142                          *   Timeout errors from the HW already meet this
1143                          *   requirement so skip to next iteration
1144                          */
1145                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1146                                 continue;
1147
1148                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1149                                 usleep_range(400, 500);
1150                                 continue;
1151                         }
1152                         if (status & DP_AUX_CH_CTL_DONE)
1153                                 goto done;
1154                 }
1155         }
1156
1157         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1158                 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
1159                         intel_dp->aux.name, status);
1160                 ret = -EBUSY;
1161                 goto out;
1162         }
1163
1164 done:
1165         /* Check for timeout or receive error.
1166          * Timeouts occur when the sink is not connected
1167          */
1168         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1169                 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
1170                         intel_dp->aux.name, status);
1171                 ret = -EIO;
1172                 goto out;
1173         }
1174
1175         /* Timeouts occur when the device isn't connected, so they're
1176          * "normal" -- don't fill the kernel log with these */
1177         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1178                 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
1179                             intel_dp->aux.name, status);
1180                 ret = -ETIMEDOUT;
1181                 goto out;
1182         }
1183
1184         /* Unload any bytes sent back from the other side */
1185         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1186                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1187
1188         /*
1189          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1190          * We have no idea of what happened so we return -EBUSY so
1191          * drm layer takes care for the necessary retries.
1192          */
1193         if (recv_bytes == 0 || recv_bytes > 20) {
1194                 drm_dbg_kms(&i915->drm,
1195                             "%s: Forbidden recv_bytes = %d on aux transaction\n",
1196                             intel_dp->aux.name, recv_bytes);
1197                 ret = -EBUSY;
1198                 goto out;
1199         }
1200
1201         if (recv_bytes > recv_size)
1202                 recv_bytes = recv_size;
1203
1204         for (i = 0; i < recv_bytes; i += 4)
1205                 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1206                                     recv + i, recv_bytes - i);
1207
1208         ret = recv_bytes;
1209 out:
1210         cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
1211
1212         if (vdd)
1213                 intel_pps_vdd_off_unlocked(intel_dp, false);
1214
1215         intel_pps_unlock(intel_dp, pps_wakeref);
1216         intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1217
1218         if (is_tc_port)
1219                 intel_tc_port_unlock(dig_port);
1220
1221         return ret;
1222 }
1223
1224 #define BARE_ADDRESS_SIZE       3
1225 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1226
1227 static void
1228 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1229                     const struct drm_dp_aux_msg *msg)
1230 {
1231         txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1232         txbuf[1] = (msg->address >> 8) & 0xff;
1233         txbuf[2] = msg->address & 0xff;
1234         txbuf[3] = msg->size - 1;
1235 }
1236
1237 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
1238 {
1239         /*
1240          * If we're trying to send the HDCP Aksv, we need to set a the Aksv
1241          * select bit to inform the hardware to send the Aksv after our header
1242          * since we can't access that data from software.
1243          */
1244         if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
1245             msg->address == DP_AUX_HDCP_AKSV)
1246                 return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
1247
1248         return 0;
1249 }
1250
1251 static ssize_t
1252 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1253 {
1254         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1255         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1256         u8 txbuf[20], rxbuf[20];
1257         size_t txsize, rxsize;
1258         u32 flags = intel_dp_aux_xfer_flags(msg);
1259         int ret;
1260
1261         intel_dp_aux_header(txbuf, msg);
1262
1263         switch (msg->request & ~DP_AUX_I2C_MOT) {
1264         case DP_AUX_NATIVE_WRITE:
1265         case DP_AUX_I2C_WRITE:
1266         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1267                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1268                 rxsize = 2; /* 0 or 1 data bytes */
1269
1270                 if (drm_WARN_ON(&i915->drm, txsize > 20))
1271                         return -E2BIG;
1272
1273                 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
1274
1275                 if (msg->buffer)
1276                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1277
1278                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1279                                         rxbuf, rxsize, flags);
1280                 if (ret > 0) {
1281                         msg->reply = rxbuf[0] >> 4;
1282
1283                         if (ret > 1) {
1284                                 /* Number of bytes written in a short write. */
1285                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1286                         } else {
1287                                 /* Return payload size. */
1288                                 ret = msg->size;
1289                         }
1290                 }
1291                 break;
1292
1293         case DP_AUX_NATIVE_READ:
1294         case DP_AUX_I2C_READ:
1295                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1296                 rxsize = msg->size + 1;
1297
1298                 if (drm_WARN_ON(&i915->drm, rxsize > 20))
1299                         return -E2BIG;
1300
1301                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1302                                         rxbuf, rxsize, flags);
1303                 if (ret > 0) {
1304                         msg->reply = rxbuf[0] >> 4;
1305                         /*
1306                          * Assume happy day, and copy the data. The caller is
1307                          * expected to check msg->reply before touching it.
1308                          *
1309                          * Return payload size.
1310                          */
1311                         ret--;
1312                         memcpy(msg->buffer, rxbuf + 1, ret);
1313                 }
1314                 break;
1315
1316         default:
1317                 ret = -EINVAL;
1318                 break;
1319         }
1320
1321         return ret;
1322 }
1323
1324
1325 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1326 {
1327         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1328         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1329         enum aux_ch aux_ch = dig_port->aux_ch;
1330
1331         switch (aux_ch) {
1332         case AUX_CH_B:
1333         case AUX_CH_C:
1334         case AUX_CH_D:
1335                 return DP_AUX_CH_CTL(aux_ch);
1336         default:
1337                 MISSING_CASE(aux_ch);
1338                 return DP_AUX_CH_CTL(AUX_CH_B);
1339         }
1340 }
1341
1342 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1343 {
1344         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1345         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1346         enum aux_ch aux_ch = dig_port->aux_ch;
1347
1348         switch (aux_ch) {
1349         case AUX_CH_B:
1350         case AUX_CH_C:
1351         case AUX_CH_D:
1352                 return DP_AUX_CH_DATA(aux_ch, index);
1353         default:
1354                 MISSING_CASE(aux_ch);
1355                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1356         }
1357 }
1358
1359 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1360 {
1361         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1362         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1363         enum aux_ch aux_ch = dig_port->aux_ch;
1364
1365         switch (aux_ch) {
1366         case AUX_CH_A:
1367                 return DP_AUX_CH_CTL(aux_ch);
1368         case AUX_CH_B:
1369         case AUX_CH_C:
1370         case AUX_CH_D:
1371                 return PCH_DP_AUX_CH_CTL(aux_ch);
1372         default:
1373                 MISSING_CASE(aux_ch);
1374                 return DP_AUX_CH_CTL(AUX_CH_A);
1375         }
1376 }
1377
1378 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1379 {
1380         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1381         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1382         enum aux_ch aux_ch = dig_port->aux_ch;
1383
1384         switch (aux_ch) {
1385         case AUX_CH_A:
1386                 return DP_AUX_CH_DATA(aux_ch, index);
1387         case AUX_CH_B:
1388         case AUX_CH_C:
1389         case AUX_CH_D:
1390                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1391         default:
1392                 MISSING_CASE(aux_ch);
1393                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1394         }
1395 }
1396
1397 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1398 {
1399         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1400         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1401         enum aux_ch aux_ch = dig_port->aux_ch;
1402
1403         switch (aux_ch) {
1404         case AUX_CH_A:
1405         case AUX_CH_B:
1406         case AUX_CH_C:
1407         case AUX_CH_D:
1408         case AUX_CH_E:
1409         case AUX_CH_F:
1410                 return DP_AUX_CH_CTL(aux_ch);
1411         default:
1412                 MISSING_CASE(aux_ch);
1413                 return DP_AUX_CH_CTL(AUX_CH_A);
1414         }
1415 }
1416
1417 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1418 {
1419         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1420         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1421         enum aux_ch aux_ch = dig_port->aux_ch;
1422
1423         switch (aux_ch) {
1424         case AUX_CH_A:
1425         case AUX_CH_B:
1426         case AUX_CH_C:
1427         case AUX_CH_D:
1428         case AUX_CH_E:
1429         case AUX_CH_F:
1430                 return DP_AUX_CH_DATA(aux_ch, index);
1431         default:
1432                 MISSING_CASE(aux_ch);
1433                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1434         }
1435 }
1436
1437 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
1438 {
1439         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1440         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1441         enum aux_ch aux_ch = dig_port->aux_ch;
1442
1443         switch (aux_ch) {
1444         case AUX_CH_A:
1445         case AUX_CH_B:
1446         case AUX_CH_C:
1447         case AUX_CH_USBC1:
1448         case AUX_CH_USBC2:
1449         case AUX_CH_USBC3:
1450         case AUX_CH_USBC4:
1451         case AUX_CH_USBC5:
1452         case AUX_CH_USBC6:
1453                 return DP_AUX_CH_CTL(aux_ch);
1454         default:
1455                 MISSING_CASE(aux_ch);
1456                 return DP_AUX_CH_CTL(AUX_CH_A);
1457         }
1458 }
1459
1460 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
1461 {
1462         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1463         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1464         enum aux_ch aux_ch = dig_port->aux_ch;
1465
1466         switch (aux_ch) {
1467         case AUX_CH_A:
1468         case AUX_CH_B:
1469         case AUX_CH_C:
1470         case AUX_CH_USBC1:
1471         case AUX_CH_USBC2:
1472         case AUX_CH_USBC3:
1473         case AUX_CH_USBC4:
1474         case AUX_CH_USBC5:
1475         case AUX_CH_USBC6:
1476                 return DP_AUX_CH_DATA(aux_ch, index);
1477         default:
1478                 MISSING_CASE(aux_ch);
1479                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1480         }
1481 }
1482
1483 static void
1484 intel_dp_aux_fini(struct intel_dp *intel_dp)
1485 {
1486         if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
1487                 cpu_latency_qos_remove_request(&intel_dp->pm_qos);
1488
1489         kfree(intel_dp->aux.name);
1490 }
1491
1492 static void
1493 intel_dp_aux_init(struct intel_dp *intel_dp)
1494 {
1495         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1496         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1497         struct intel_encoder *encoder = &dig_port->base;
1498         enum aux_ch aux_ch = dig_port->aux_ch;
1499
1500         if (INTEL_GEN(dev_priv) >= 12) {
1501                 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
1502                 intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
1503         } else if (INTEL_GEN(dev_priv) >= 9) {
1504                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1505                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1506         } else if (HAS_PCH_SPLIT(dev_priv)) {
1507                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1508                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1509         } else {
1510                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1511                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1512         }
1513
1514         if (INTEL_GEN(dev_priv) >= 9)
1515                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1516         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1517                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1518         else if (HAS_PCH_SPLIT(dev_priv))
1519                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1520         else
1521                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1522
1523         if (INTEL_GEN(dev_priv) >= 9)
1524                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1525         else
1526                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1527
1528         drm_dp_aux_init(&intel_dp->aux);
1529
1530         /* Failure to allocate our preferred name is not critical */
1531         if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
1532                 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
1533                                                aux_ch - AUX_CH_USBC1 + '1',
1534                                                encoder->base.name);
1535         else
1536                 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
1537                                                aux_ch_name(aux_ch),
1538                                                encoder->base.name);
1539
1540         intel_dp->aux.transfer = intel_dp_aux_transfer;
1541         cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
1542 }
1543
1544 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1545 {
1546         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1547
1548         return max_rate >= 540000;
1549 }
1550
1551 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1552 {
1553         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1554
1555         return max_rate >= 810000;
1556 }
1557
1558 static void
1559 intel_dp_set_clock(struct intel_encoder *encoder,
1560                    struct intel_crtc_state *pipe_config)
1561 {
1562         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1563         const struct dp_link_dpll *divisor = NULL;
1564         int i, count = 0;
1565
1566         if (IS_G4X(dev_priv)) {
1567                 divisor = g4x_dpll;
1568                 count = ARRAY_SIZE(g4x_dpll);
1569         } else if (HAS_PCH_SPLIT(dev_priv)) {
1570                 divisor = pch_dpll;
1571                 count = ARRAY_SIZE(pch_dpll);
1572         } else if (IS_CHERRYVIEW(dev_priv)) {
1573                 divisor = chv_dpll;
1574                 count = ARRAY_SIZE(chv_dpll);
1575         } else if (IS_VALLEYVIEW(dev_priv)) {
1576                 divisor = vlv_dpll;
1577                 count = ARRAY_SIZE(vlv_dpll);
1578         }
1579
1580         if (divisor && count) {
1581                 for (i = 0; i < count; i++) {
1582                         if (pipe_config->port_clock == divisor[i].clock) {
1583                                 pipe_config->dpll = divisor[i].dpll;
1584                                 pipe_config->clock_set = true;
1585                                 break;
1586                         }
1587                 }
1588         }
1589 }
1590
1591 static void snprintf_int_array(char *str, size_t len,
1592                                const int *array, int nelem)
1593 {
1594         int i;
1595
1596         str[0] = '\0';
1597
1598         for (i = 0; i < nelem; i++) {
1599                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1600                 if (r >= len)
1601                         return;
1602                 str += r;
1603                 len -= r;
1604         }
1605 }
1606
1607 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1608 {
1609         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1610         char str[128]; /* FIXME: too big for stack? */
1611
1612         if (!drm_debug_enabled(DRM_UT_KMS))
1613                 return;
1614
1615         snprintf_int_array(str, sizeof(str),
1616                            intel_dp->source_rates, intel_dp->num_source_rates);
1617         drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
1618
1619         snprintf_int_array(str, sizeof(str),
1620                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1621         drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
1622
1623         snprintf_int_array(str, sizeof(str),
1624                            intel_dp->common_rates, intel_dp->num_common_rates);
1625         drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
1626 }
1627
1628 int
1629 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1630 {
1631         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1632         int len;
1633
1634         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1635         if (drm_WARN_ON(&i915->drm, len <= 0))
1636                 return 162000;
1637
1638         return intel_dp->common_rates[len - 1];
1639 }
1640
1641 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1642 {
1643         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1644         int i = intel_dp_rate_index(intel_dp->sink_rates,
1645                                     intel_dp->num_sink_rates, rate);
1646
1647         if (drm_WARN_ON(&i915->drm, i < 0))
1648                 i = 0;
1649
1650         return i;
1651 }
1652
1653 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1654                            u8 *link_bw, u8 *rate_select)
1655 {
1656         /* eDP 1.4 rate select method. */
1657         if (intel_dp->use_rate_select) {
1658                 *link_bw = 0;
1659                 *rate_select =
1660                         intel_dp_rate_select(intel_dp, port_clock);
1661         } else {
1662                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1663                 *rate_select = 0;
1664         }
1665 }
1666
1667 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1668                                          const struct intel_crtc_state *pipe_config)
1669 {
1670         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1671
1672         /* On TGL, FEC is supported on all Pipes */
1673         if (INTEL_GEN(dev_priv) >= 12)
1674                 return true;
1675
1676         if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
1677                 return true;
1678
1679         return false;
1680 }
1681
1682 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1683                                   const struct intel_crtc_state *pipe_config)
1684 {
1685         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1686                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1687 }
1688
1689 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1690                                   const struct intel_crtc_state *crtc_state)
1691 {
1692         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
1693                 return false;
1694
1695         return intel_dsc_source_support(crtc_state) &&
1696                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1697 }
1698
1699 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
1700                                    const struct intel_crtc_state *crtc_state)
1701 {
1702         return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
1703                 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
1704                  intel_dp->dfp.ycbcr_444_to_420);
1705 }
1706
1707 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
1708                                     const struct intel_crtc_state *crtc_state, int bpc)
1709 {
1710         int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
1711
1712         if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
1713                 clock /= 2;
1714
1715         return clock;
1716 }
1717
1718 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
1719                                            const struct intel_crtc_state *crtc_state, int bpc)
1720 {
1721         int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
1722
1723         if (intel_dp->dfp.min_tmds_clock &&
1724             tmds_clock < intel_dp->dfp.min_tmds_clock)
1725                 return false;
1726
1727         if (intel_dp->dfp.max_tmds_clock &&
1728             tmds_clock > intel_dp->dfp.max_tmds_clock)
1729                 return false;
1730
1731         return true;
1732 }
1733
1734 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
1735                                               const struct intel_crtc_state *crtc_state,
1736                                               int bpc)
1737 {
1738
1739         return intel_hdmi_deep_color_possible(crtc_state, bpc,
1740                                               intel_dp->has_hdmi_sink,
1741                                               intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
1742                 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
1743 }
1744
1745 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
1746                             const struct intel_crtc_state *crtc_state)
1747 {
1748         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1749         struct intel_connector *intel_connector = intel_dp->attached_connector;
1750         int bpp, bpc;
1751
1752         bpc = crtc_state->pipe_bpp / 3;
1753
1754         if (intel_dp->dfp.max_bpc)
1755                 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
1756
1757         if (intel_dp->dfp.min_tmds_clock) {
1758                 for (; bpc >= 10; bpc -= 2) {
1759                         if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
1760                                 break;
1761                 }
1762         }
1763
1764         bpp = bpc * 3;
1765         if (intel_dp_is_edp(intel_dp)) {
1766                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1767                 if (intel_connector->base.display_info.bpc == 0 &&
1768                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1769                         drm_dbg_kms(&dev_priv->drm,
1770                                     "clamping bpp for eDP panel to BIOS-provided %i\n",
1771                                     dev_priv->vbt.edp.bpp);
1772                         bpp = dev_priv->vbt.edp.bpp;
1773                 }
1774         }
1775
1776         return bpp;
1777 }
1778
1779 /* Adjust link config limits based on compliance test requests. */
1780 void
1781 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1782                                   struct intel_crtc_state *pipe_config,
1783                                   struct link_config_limits *limits)
1784 {
1785         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1786
1787         /* For DP Compliance we override the computed bpp for the pipe */
1788         if (intel_dp->compliance.test_data.bpc != 0) {
1789                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1790
1791                 limits->min_bpp = limits->max_bpp = bpp;
1792                 pipe_config->dither_force_disable = bpp == 6 * 3;
1793
1794                 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
1795         }
1796
1797         /* Use values requested by Compliance Test Request */
1798         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1799                 int index;
1800
1801                 /* Validate the compliance test data since max values
1802                  * might have changed due to link train fallback.
1803                  */
1804                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1805                                                intel_dp->compliance.test_lane_count)) {
1806                         index = intel_dp_rate_index(intel_dp->common_rates,
1807                                                     intel_dp->num_common_rates,
1808                                                     intel_dp->compliance.test_link_rate);
1809                         if (index >= 0)
1810                                 limits->min_clock = limits->max_clock = index;
1811                         limits->min_lane_count = limits->max_lane_count =
1812                                 intel_dp->compliance.test_lane_count;
1813                 }
1814         }
1815 }
1816
1817 /* Optimize link config in order: max bpp, min clock, min lanes */
1818 static int
1819 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1820                                   struct intel_crtc_state *pipe_config,
1821                                   const struct link_config_limits *limits)
1822 {
1823         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1824         int bpp, clock, lane_count;
1825         int mode_rate, link_clock, link_avail;
1826
1827         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1828                 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1829
1830                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1831                                                    output_bpp);
1832
1833                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1834                         for (lane_count = limits->min_lane_count;
1835                              lane_count <= limits->max_lane_count;
1836                              lane_count <<= 1) {
1837                                 link_clock = intel_dp->common_rates[clock];
1838                                 link_avail = intel_dp_max_data_rate(link_clock,
1839                                                                     lane_count);
1840
1841                                 if (mode_rate <= link_avail) {
1842                                         pipe_config->lane_count = lane_count;
1843                                         pipe_config->pipe_bpp = bpp;
1844                                         pipe_config->port_clock = link_clock;
1845
1846                                         return 0;
1847                                 }
1848                         }
1849                 }
1850         }
1851
1852         return -EINVAL;
1853 }
1854
1855 /* Optimize link config in order: max bpp, min lanes, min clock */
1856 static int
1857 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1858                                   struct intel_crtc_state *pipe_config,
1859                                   const struct link_config_limits *limits)
1860 {
1861         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1862         int bpp, clock, lane_count;
1863         int mode_rate, link_clock, link_avail;
1864
1865         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1866                 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1867
1868                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1869                                                    output_bpp);
1870
1871                 for (lane_count = limits->min_lane_count;
1872                      lane_count <= limits->max_lane_count;
1873                      lane_count <<= 1) {
1874                         for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1875                                 link_clock = intel_dp->common_rates[clock];
1876                                 link_avail = intel_dp_max_data_rate(link_clock,
1877                                                                     lane_count);
1878
1879                                 if (mode_rate <= link_avail) {
1880                                         pipe_config->lane_count = lane_count;
1881                                         pipe_config->pipe_bpp = bpp;
1882                                         pipe_config->port_clock = link_clock;
1883
1884                                         return 0;
1885                                 }
1886                         }
1887                 }
1888         }
1889
1890         return -EINVAL;
1891 }
1892
1893 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1894 {
1895         int i, num_bpc;
1896         u8 dsc_bpc[3] = {0};
1897
1898         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1899                                                        dsc_bpc);
1900         for (i = 0; i < num_bpc; i++) {
1901                 if (dsc_max_bpc >= dsc_bpc[i])
1902                         return dsc_bpc[i] * 3;
1903         }
1904
1905         return 0;
1906 }
1907
1908 #define DSC_SUPPORTED_VERSION_MIN               1
1909
1910 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
1911                                        struct intel_crtc_state *crtc_state)
1912 {
1913         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1914         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1915         struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1916         u8 line_buf_depth;
1917         int ret;
1918
1919         /*
1920          * RC_MODEL_SIZE is currently a constant across all configurations.
1921          *
1922          * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
1923          * DP_DSC_RC_BUF_SIZE for this.
1924          */
1925         vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
1926
1927         ret = intel_dsc_compute_params(encoder, crtc_state);
1928         if (ret)
1929                 return ret;
1930
1931         /*
1932          * Slice Height of 8 works for all currently available panels. So start
1933          * with that if pic_height is an integral multiple of 8. Eventually add
1934          * logic to try multiple slice heights.
1935          */
1936         if (vdsc_cfg->pic_height % 8 == 0)
1937                 vdsc_cfg->slice_height = 8;
1938         else if (vdsc_cfg->pic_height % 4 == 0)
1939                 vdsc_cfg->slice_height = 4;
1940         else
1941                 vdsc_cfg->slice_height = 2;
1942
1943         vdsc_cfg->dsc_version_major =
1944                 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1945                  DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
1946         vdsc_cfg->dsc_version_minor =
1947                 min(DSC_SUPPORTED_VERSION_MIN,
1948                     (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1949                      DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
1950
1951         vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
1952                 DP_DSC_RGB;
1953
1954         line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
1955         if (!line_buf_depth) {
1956                 drm_dbg_kms(&i915->drm,
1957                             "DSC Sink Line Buffer Depth invalid\n");
1958                 return -EINVAL;
1959         }
1960
1961         if (vdsc_cfg->dsc_version_minor == 2)
1962                 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
1963                         DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
1964         else
1965                 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
1966                         DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
1967
1968         vdsc_cfg->block_pred_enable =
1969                 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
1970                 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
1971
1972         return drm_dsc_compute_rc_parameters(vdsc_cfg);
1973 }
1974
1975 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1976                                        struct intel_crtc_state *pipe_config,
1977                                        struct drm_connector_state *conn_state,
1978                                        struct link_config_limits *limits)
1979 {
1980         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1981         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1982         const struct drm_display_mode *adjusted_mode =
1983                 &pipe_config->hw.adjusted_mode;
1984         u8 dsc_max_bpc;
1985         int pipe_bpp;
1986         int ret;
1987
1988         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1989                 intel_dp_supports_fec(intel_dp, pipe_config);
1990
1991         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1992                 return -EINVAL;
1993
1994         /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1995         if (INTEL_GEN(dev_priv) >= 12)
1996                 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
1997         else
1998                 dsc_max_bpc = min_t(u8, 10,
1999                                     conn_state->max_requested_bpc);
2000
2001         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2002
2003         /* Min Input BPC for ICL+ is 8 */
2004         if (pipe_bpp < 8 * 3) {
2005                 drm_dbg_kms(&dev_priv->drm,
2006                             "No DSC support for less than 8bpc\n");
2007                 return -EINVAL;
2008         }
2009
2010         /*
2011          * For now enable DSC for max bpp, max link rate, max lane count.
2012          * Optimize this later for the minimum possible link rate/lane count
2013          * with DSC enabled for the requested mode.
2014          */
2015         pipe_config->pipe_bpp = pipe_bpp;
2016         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2017         pipe_config->lane_count = limits->max_lane_count;
2018
2019         if (intel_dp_is_edp(intel_dp)) {
2020                 pipe_config->dsc.compressed_bpp =
2021                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2022                               pipe_config->pipe_bpp);
2023                 pipe_config->dsc.slice_count =
2024                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2025                                                         true);
2026         } else {
2027                 u16 dsc_max_output_bpp;
2028                 u8 dsc_dp_slice_count;
2029
2030                 dsc_max_output_bpp =
2031                         intel_dp_dsc_get_output_bpp(dev_priv,
2032                                                     pipe_config->port_clock,
2033                                                     pipe_config->lane_count,
2034                                                     adjusted_mode->crtc_clock,
2035                                                     adjusted_mode->crtc_hdisplay,
2036                                                     pipe_config->bigjoiner);
2037                 dsc_dp_slice_count =
2038                         intel_dp_dsc_get_slice_count(intel_dp,
2039                                                      adjusted_mode->crtc_clock,
2040                                                      adjusted_mode->crtc_hdisplay,
2041                                                      pipe_config->bigjoiner);
2042                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2043                         drm_dbg_kms(&dev_priv->drm,
2044                                     "Compressed BPP/Slice Count not supported\n");
2045                         return -EINVAL;
2046                 }
2047                 pipe_config->dsc.compressed_bpp = min_t(u16,
2048                                                                dsc_max_output_bpp >> 4,
2049                                                                pipe_config->pipe_bpp);
2050                 pipe_config->dsc.slice_count = dsc_dp_slice_count;
2051         }
2052         /*
2053          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2054          * is greater than the maximum Cdclock and if slice count is even
2055          * then we need to use 2 VDSC instances.
2056          */
2057         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
2058             pipe_config->bigjoiner) {
2059                 if (pipe_config->dsc.slice_count < 2) {
2060                         drm_dbg_kms(&dev_priv->drm,
2061                                     "Cannot split stream to use 2 VDSC instances\n");
2062                         return -EINVAL;
2063                 }
2064
2065                 pipe_config->dsc.dsc_split = true;
2066         }
2067
2068         ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
2069         if (ret < 0) {
2070                 drm_dbg_kms(&dev_priv->drm,
2071                             "Cannot compute valid DSC parameters for Input Bpp = %d "
2072                             "Compressed BPP = %d\n",
2073                             pipe_config->pipe_bpp,
2074                             pipe_config->dsc.compressed_bpp);
2075                 return ret;
2076         }
2077
2078         pipe_config->dsc.compression_enable = true;
2079         drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2080                     "Compressed Bpp = %d Slice Count = %d\n",
2081                     pipe_config->pipe_bpp,
2082                     pipe_config->dsc.compressed_bpp,
2083                     pipe_config->dsc.slice_count);
2084
2085         return 0;
2086 }
2087
2088 static int
2089 intel_dp_compute_link_config(struct intel_encoder *encoder,
2090                              struct intel_crtc_state *pipe_config,
2091                              struct drm_connector_state *conn_state)
2092 {
2093         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2094         const struct drm_display_mode *adjusted_mode =
2095                 &pipe_config->hw.adjusted_mode;
2096         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2097         struct link_config_limits limits;
2098         int common_len;
2099         int ret;
2100
2101         common_len = intel_dp_common_len_rate_limit(intel_dp,
2102                                                     intel_dp->max_link_rate);
2103
2104         /* No common link rates between source and sink */
2105         drm_WARN_ON(encoder->base.dev, common_len <= 0);
2106
2107         limits.min_clock = 0;
2108         limits.max_clock = common_len - 1;
2109
2110         limits.min_lane_count = 1;
2111         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2112
2113         limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
2114         limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
2115
2116         if (intel_dp->use_max_params) {
2117                 /*
2118                  * Use the maximum clock and number of lanes the eDP panel
2119                  * advertizes being capable of in case the initial fast
2120                  * optimal params failed us. The panels are generally
2121                  * designed to support only a single clock and lane
2122                  * configuration, and typically on older panels these
2123                  * values correspond to the native resolution of the panel.
2124                  */
2125                 limits.min_lane_count = limits.max_lane_count;
2126                 limits.min_clock = limits.max_clock;
2127         }
2128
2129         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2130
2131         drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
2132                     "max rate %d max bpp %d pixel clock %iKHz\n",
2133                     limits.max_lane_count,
2134                     intel_dp->common_rates[limits.max_clock],
2135                     limits.max_bpp, adjusted_mode->crtc_clock);
2136
2137         if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
2138              adjusted_mode->crtc_hdisplay > 5120) &&
2139             intel_dp_can_bigjoiner(intel_dp))
2140                 pipe_config->bigjoiner = true;
2141
2142         if (intel_dp_is_edp(intel_dp))
2143                 /*
2144                  * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2145                  * section A.1: "It is recommended that the minimum number of
2146                  * lanes be used, using the minimum link rate allowed for that
2147                  * lane configuration."
2148                  *
2149                  * Note that we fall back to the max clock and lane count for eDP
2150                  * panels that fail with the fast optimal settings (see
2151                  * intel_dp->use_max_params), in which case the fast vs. wide
2152                  * choice doesn't matter.
2153                  */
2154                 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
2155         else
2156                 /* Optimize for slow and wide. */
2157                 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2158
2159         /* enable compression if the mode doesn't fit available BW */
2160         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
2161         if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) {
2162                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2163                                                   conn_state, &limits);
2164                 if (ret < 0)
2165                         return ret;
2166         }
2167
2168         if (pipe_config->dsc.compression_enable) {
2169                 drm_dbg_kms(&i915->drm,
2170                             "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2171                             pipe_config->lane_count, pipe_config->port_clock,
2172                             pipe_config->pipe_bpp,
2173                             pipe_config->dsc.compressed_bpp);
2174
2175                 drm_dbg_kms(&i915->drm,
2176                             "DP link rate required %i available %i\n",
2177                             intel_dp_link_required(adjusted_mode->crtc_clock,
2178                                                    pipe_config->dsc.compressed_bpp),
2179                             intel_dp_max_data_rate(pipe_config->port_clock,
2180                                                    pipe_config->lane_count));
2181         } else {
2182                 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
2183                             pipe_config->lane_count, pipe_config->port_clock,
2184                             pipe_config->pipe_bpp);
2185
2186                 drm_dbg_kms(&i915->drm,
2187                             "DP link rate required %i available %i\n",
2188                             intel_dp_link_required(adjusted_mode->crtc_clock,
2189                                                    pipe_config->pipe_bpp),
2190                             intel_dp_max_data_rate(pipe_config->port_clock,
2191                                                    pipe_config->lane_count));
2192         }
2193         return 0;
2194 }
2195
2196 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2197                                   const struct drm_connector_state *conn_state)
2198 {
2199         const struct intel_digital_connector_state *intel_conn_state =
2200                 to_intel_digital_connector_state(conn_state);
2201         const struct drm_display_mode *adjusted_mode =
2202                 &crtc_state->hw.adjusted_mode;
2203
2204         /*
2205          * Our YCbCr output is always limited range.
2206          * crtc_state->limited_color_range only applies to RGB,
2207          * and it must never be set for YCbCr or we risk setting
2208          * some conflicting bits in PIPECONF which will mess up
2209          * the colors on the monitor.
2210          */
2211         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2212                 return false;
2213
2214         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2215                 /*
2216                  * See:
2217                  * CEA-861-E - 5.1 Default Encoding Parameters
2218                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2219                  */
2220                 return crtc_state->pipe_bpp != 18 &&
2221                         drm_default_rgb_quant_range(adjusted_mode) ==
2222                         HDMI_QUANTIZATION_RANGE_LIMITED;
2223         } else {
2224                 return intel_conn_state->broadcast_rgb ==
2225                         INTEL_BROADCAST_RGB_LIMITED;
2226         }
2227 }
2228
2229 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2230                                     enum port port)
2231 {
2232         if (IS_G4X(dev_priv))
2233                 return false;
2234         if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
2235                 return false;
2236
2237         return true;
2238 }
2239
2240 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2241                                              const struct drm_connector_state *conn_state,
2242                                              struct drm_dp_vsc_sdp *vsc)
2243 {
2244         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2245         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2246
2247         /*
2248          * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2249          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2250          * Colorimetry Format indication.
2251          */
2252         vsc->revision = 0x5;
2253         vsc->length = 0x13;
2254
2255         /* DP 1.4a spec, Table 2-120 */
2256         switch (crtc_state->output_format) {
2257         case INTEL_OUTPUT_FORMAT_YCBCR444:
2258                 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2259                 break;
2260         case INTEL_OUTPUT_FORMAT_YCBCR420:
2261                 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2262                 break;
2263         case INTEL_OUTPUT_FORMAT_RGB:
2264         default:
2265                 vsc->pixelformat = DP_PIXELFORMAT_RGB;
2266         }
2267
2268         switch (conn_state->colorspace) {
2269         case DRM_MODE_COLORIMETRY_BT709_YCC:
2270                 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2271                 break;
2272         case DRM_MODE_COLORIMETRY_XVYCC_601:
2273                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2274                 break;
2275         case DRM_MODE_COLORIMETRY_XVYCC_709:
2276                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2277                 break;
2278         case DRM_MODE_COLORIMETRY_SYCC_601:
2279                 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2280                 break;
2281         case DRM_MODE_COLORIMETRY_OPYCC_601:
2282                 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2283                 break;
2284         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2285                 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2286                 break;
2287         case DRM_MODE_COLORIMETRY_BT2020_RGB:
2288                 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2289                 break;
2290         case DRM_MODE_COLORIMETRY_BT2020_YCC:
2291                 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2292                 break;
2293         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2294         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2295                 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2296                 break;
2297         default:
2298                 /*
2299                  * RGB->YCBCR color conversion uses the BT.709
2300                  * color space.
2301                  */
2302                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2303                         vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2304                 else
2305                         vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2306                 break;
2307         }
2308
2309         vsc->bpc = crtc_state->pipe_bpp / 3;
2310
2311         /* only RGB pixelformat supports 6 bpc */
2312         drm_WARN_ON(&dev_priv->drm,
2313                     vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2314
2315         /* all YCbCr are always limited range */
2316         vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2317         vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2318 }
2319
2320 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2321                                      struct intel_crtc_state *crtc_state,
2322                                      const struct drm_connector_state *conn_state)
2323 {
2324         struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
2325
2326         /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
2327         if (crtc_state->has_psr)
2328                 return;
2329
2330         if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
2331                 return;
2332
2333         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2334         vsc->sdp_type = DP_SDP_VSC;
2335         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2336                                          &crtc_state->infoframes.vsc);
2337 }
2338
2339 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
2340                                   const struct intel_crtc_state *crtc_state,
2341                                   const struct drm_connector_state *conn_state,
2342                                   struct drm_dp_vsc_sdp *vsc)
2343 {
2344         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2345
2346         vsc->sdp_type = DP_SDP_VSC;
2347
2348         if (dev_priv->psr.psr2_enabled) {
2349                 if (dev_priv->psr.colorimetry_support &&
2350                     intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2351                         /* [PSR2, +Colorimetry] */
2352                         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2353                                                          vsc);
2354                 } else {
2355                         /*
2356                          * [PSR2, -Colorimetry]
2357                          * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2358                          * 3D stereo + PSR/PSR2 + Y-coordinate.
2359                          */
2360                         vsc->revision = 0x4;
2361                         vsc->length = 0xe;
2362                 }
2363         } else {
2364                 /*
2365                  * [PSR1]
2366                  * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2367                  * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2368                  * higher).
2369                  */
2370                 vsc->revision = 0x2;
2371                 vsc->length = 0x8;
2372         }
2373 }
2374
2375 static void
2376 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2377                                             struct intel_crtc_state *crtc_state,
2378                                             const struct drm_connector_state *conn_state)
2379 {
2380         int ret;
2381         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2382         struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2383
2384         if (!conn_state->hdr_output_metadata)
2385                 return;
2386
2387         ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2388
2389         if (ret) {
2390                 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2391                 return;
2392         }
2393
2394         crtc_state->infoframes.enable |=
2395                 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2396 }
2397
2398 static void
2399 intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
2400                              struct intel_crtc_state *pipe_config,
2401                              int output_bpp, bool constant_n)
2402 {
2403         struct intel_connector *intel_connector = intel_dp->attached_connector;
2404         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2405
2406         /*
2407          * DRRS and PSR can't be enable together, so giving preference to PSR
2408          * as it allows more power-savings by complete shutting down display,
2409          * so to guarantee this, intel_dp_drrs_compute_config() must be called
2410          * after intel_psr_compute_config().
2411          */
2412         if (pipe_config->has_psr)
2413                 return;
2414
2415         if (!intel_connector->panel.downclock_mode ||
2416             dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
2417                 return;
2418
2419         pipe_config->has_drrs = true;
2420         intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
2421                                intel_connector->panel.downclock_mode->clock,
2422                                pipe_config->port_clock, &pipe_config->dp_m2_n2,
2423                                constant_n, pipe_config->fec_enable);
2424 }
2425
2426 int
2427 intel_dp_compute_config(struct intel_encoder *encoder,
2428                         struct intel_crtc_state *pipe_config,
2429                         struct drm_connector_state *conn_state)
2430 {
2431         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2432         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2433         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2434         enum port port = encoder->port;
2435         struct intel_connector *intel_connector = intel_dp->attached_connector;
2436         struct intel_digital_connector_state *intel_conn_state =
2437                 to_intel_digital_connector_state(conn_state);
2438         bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
2439                                            DP_DPCD_QUIRK_CONSTANT_N);
2440         int ret = 0, output_bpp;
2441
2442         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2443                 pipe_config->has_pch_encoder = true;
2444
2445         pipe_config->output_format = intel_dp_output_format(&intel_connector->base,
2446                                                             adjusted_mode);
2447
2448         if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
2449                 ret = intel_pch_panel_fitting(pipe_config, conn_state);
2450                 if (ret)
2451                         return ret;
2452         }
2453
2454         if (!intel_dp_port_has_audio(dev_priv, port))
2455                 pipe_config->has_audio = false;
2456         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2457                 pipe_config->has_audio = intel_dp->has_audio;
2458         else
2459                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2460
2461         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2462                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2463                                        adjusted_mode);
2464
2465                 if (HAS_GMCH(dev_priv))
2466                         ret = intel_gmch_panel_fitting(pipe_config, conn_state);
2467                 else
2468                         ret = intel_pch_panel_fitting(pipe_config, conn_state);
2469                 if (ret)
2470                         return ret;
2471         }
2472
2473         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2474                 return -EINVAL;
2475
2476         if (HAS_GMCH(dev_priv) &&
2477             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2478                 return -EINVAL;
2479
2480         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2481                 return -EINVAL;
2482
2483         if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2484                 return -EINVAL;
2485
2486         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2487         if (ret < 0)
2488                 return ret;
2489
2490         pipe_config->limited_color_range =
2491                 intel_dp_limited_color_range(pipe_config, conn_state);
2492
2493         if (pipe_config->dsc.compression_enable)
2494                 output_bpp = pipe_config->dsc.compressed_bpp;
2495         else
2496                 output_bpp = intel_dp_output_bpp(pipe_config->output_format,
2497                                                  pipe_config->pipe_bpp);
2498
2499         intel_link_compute_m_n(output_bpp,
2500                                pipe_config->lane_count,
2501                                adjusted_mode->crtc_clock,
2502                                pipe_config->port_clock,
2503                                &pipe_config->dp_m_n,
2504                                constant_n, pipe_config->fec_enable);
2505
2506         if (!HAS_DDI(dev_priv))
2507                 intel_dp_set_clock(encoder, pipe_config);
2508
2509         intel_psr_compute_config(intel_dp, pipe_config);
2510         intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
2511                                      constant_n);
2512         intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
2513         intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
2514
2515         return 0;
2516 }
2517
2518 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2519                               int link_rate, int lane_count)
2520 {
2521         intel_dp->link_trained = false;
2522         intel_dp->link_rate = link_rate;
2523         intel_dp->lane_count = lane_count;
2524 }
2525
2526 static void intel_dp_prepare(struct intel_encoder *encoder,
2527                              const struct intel_crtc_state *pipe_config)
2528 {
2529         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2530         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2531         enum port port = encoder->port;
2532         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2533         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2534
2535         intel_dp_set_link_params(intel_dp,
2536                                  pipe_config->port_clock,
2537                                  pipe_config->lane_count);
2538
2539         /*
2540          * There are four kinds of DP registers:
2541          *
2542          *      IBX PCH
2543          *      SNB CPU
2544          *      IVB CPU
2545          *      CPT PCH
2546          *
2547          * IBX PCH and CPU are the same for almost everything,
2548          * except that the CPU DP PLL is configured in this
2549          * register
2550          *
2551          * CPT PCH is quite different, having many bits moved
2552          * to the TRANS_DP_CTL register instead. That
2553          * configuration happens (oddly) in ilk_pch_enable
2554          */
2555
2556         /* Preserve the BIOS-computed detected bit. This is
2557          * supposed to be read-only.
2558          */
2559         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
2560
2561         /* Handle DP bits in common between all three register formats */
2562         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2563         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2564
2565         /* Split out the IBX/CPU vs CPT settings */
2566
2567         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2568                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2569                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2570                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2571                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2572                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2573
2574                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2575                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2576
2577                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2578         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2579                 u32 trans_dp;
2580
2581                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2582
2583                 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
2584                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2585                         trans_dp |= TRANS_DP_ENH_FRAMING;
2586                 else
2587                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
2588                 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
2589         } else {
2590                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2591                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
2592
2593                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2594                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2595                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2596                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2597                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2598
2599                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2600                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2601
2602                 if (IS_CHERRYVIEW(dev_priv))
2603                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2604                 else
2605                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2606         }
2607 }
2608
2609
2610 /* Enable backlight PWM and backlight PP control. */
2611 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2612                             const struct drm_connector_state *conn_state)
2613 {
2614         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
2615         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2616
2617         if (!intel_dp_is_edp(intel_dp))
2618                 return;
2619
2620         drm_dbg_kms(&i915->drm, "\n");
2621
2622         intel_panel_enable_backlight(crtc_state, conn_state);
2623         intel_pps_backlight_on(intel_dp);
2624 }
2625
2626 /* Disable backlight PP control and backlight PWM. */
2627 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2628 {
2629         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
2630         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2631
2632         if (!intel_dp_is_edp(intel_dp))
2633                 return;
2634
2635         drm_dbg_kms(&i915->drm, "\n");
2636
2637         intel_pps_backlight_off(intel_dp);
2638         intel_panel_disable_backlight(old_conn_state);
2639 }
2640
2641 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2642 {
2643         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2644         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2645         bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
2646
2647         I915_STATE_WARN(cur_state != state,
2648                         "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
2649                         dig_port->base.base.base.id, dig_port->base.base.name,
2650                         onoff(state), onoff(cur_state));
2651 }
2652 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2653
2654 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2655 {
2656         bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
2657
2658         I915_STATE_WARN(cur_state != state,
2659                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2660                         onoff(state), onoff(cur_state));
2661 }
2662 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2663 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2664
2665 static void ilk_edp_pll_on(struct intel_dp *intel_dp,
2666                            const struct intel_crtc_state *pipe_config)
2667 {
2668         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2669         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2670
2671         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
2672         assert_dp_port_disabled(intel_dp);
2673         assert_edp_pll_disabled(dev_priv);
2674
2675         drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
2676                     pipe_config->port_clock);
2677
2678         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2679
2680         if (pipe_config->port_clock == 162000)
2681                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2682         else
2683                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2684
2685         intel_de_write(dev_priv, DP_A, intel_dp->DP);
2686         intel_de_posting_read(dev_priv, DP_A);
2687         udelay(500);
2688
2689         /*
2690          * [DevILK] Work around required when enabling DP PLL
2691          * while a pipe is enabled going to FDI:
2692          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2693          * 2. Program DP PLL enable
2694          */
2695         if (IS_GEN(dev_priv, 5))
2696                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2697
2698         intel_dp->DP |= DP_PLL_ENABLE;
2699
2700         intel_de_write(dev_priv, DP_A, intel_dp->DP);
2701         intel_de_posting_read(dev_priv, DP_A);
2702         udelay(200);
2703 }
2704
2705 static void ilk_edp_pll_off(struct intel_dp *intel_dp,
2706                             const struct intel_crtc_state *old_crtc_state)
2707 {
2708         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2709         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2710
2711         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2712         assert_dp_port_disabled(intel_dp);
2713         assert_edp_pll_enabled(dev_priv);
2714
2715         drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
2716
2717         intel_dp->DP &= ~DP_PLL_ENABLE;
2718
2719         intel_de_write(dev_priv, DP_A, intel_dp->DP);
2720         intel_de_posting_read(dev_priv, DP_A);
2721         udelay(200);
2722 }
2723
2724 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2725 {
2726         /*
2727          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2728          * be capable of signalling downstream hpd with a long pulse.
2729          * Whether or not that means D3 is safe to use is not clear,
2730          * but let's assume so until proven otherwise.
2731          *
2732          * FIXME should really check all downstream ports...
2733          */
2734         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2735                 drm_dp_is_branch(intel_dp->dpcd) &&
2736                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2737 }
2738
2739 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2740                                            const struct intel_crtc_state *crtc_state,
2741                                            bool enable)
2742 {
2743         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2744         int ret;
2745
2746         if (!crtc_state->dsc.compression_enable)
2747                 return;
2748
2749         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2750                                  enable ? DP_DECOMPRESSION_EN : 0);
2751         if (ret < 0)
2752                 drm_dbg_kms(&i915->drm,
2753                             "Failed to %s sink decompression state\n",
2754                             enable ? "enable" : "disable");
2755 }
2756
2757 static void
2758 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
2759 {
2760         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2761         u8 oui[] = { 0x00, 0xaa, 0x01 };
2762         u8 buf[3] = { 0 };
2763
2764         /*
2765          * During driver init, we want to be careful and avoid changing the source OUI if it's
2766          * already set to what we want, so as to avoid clearing any state by accident
2767          */
2768         if (careful) {
2769                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
2770                         drm_err(&i915->drm, "Failed to read source OUI\n");
2771
2772                 if (memcmp(oui, buf, sizeof(oui)) == 0)
2773                         return;
2774         }
2775
2776         if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
2777                 drm_err(&i915->drm, "Failed to write source OUI\n");
2778 }
2779
2780 /* If the device supports it, try to set the power state appropriately */
2781 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
2782 {
2783         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2784         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2785         int ret, i;
2786
2787         /* Should have a valid DPCD by this point */
2788         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2789                 return;
2790
2791         if (mode != DP_SET_POWER_D0) {
2792                 if (downstream_hpd_needs_d0(intel_dp))
2793                         return;
2794
2795                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
2796         } else {
2797                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2798
2799                 lspcon_resume(dp_to_dig_port(intel_dp));
2800
2801                 /* Write the source OUI as early as possible */
2802                 if (intel_dp_is_edp(intel_dp))
2803                         intel_edp_init_source_oui(intel_dp, false);
2804
2805                 /*
2806                  * When turning on, we need to retry for 1ms to give the sink
2807                  * time to wake up.
2808                  */
2809                 for (i = 0; i < 3; i++) {
2810                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
2811                         if (ret == 1)
2812                                 break;
2813                         msleep(1);
2814                 }
2815
2816                 if (ret == 1 && lspcon->active)
2817                         lspcon_wait_pcon_mode(lspcon);
2818         }
2819
2820         if (ret != 1)
2821                 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
2822                             encoder->base.base.id, encoder->base.name,
2823                             mode == DP_SET_POWER_D0 ? "D0" : "D3");
2824 }
2825
2826 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2827                                  enum port port, enum pipe *pipe)
2828 {
2829         enum pipe p;
2830
2831         for_each_pipe(dev_priv, p) {
2832                 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
2833
2834                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2835                         *pipe = p;
2836                         return true;
2837                 }
2838         }
2839
2840         drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
2841                     port_name(port));
2842
2843         /* must initialize pipe to something for the asserts */
2844         *pipe = PIPE_A;
2845
2846         return false;
2847 }
2848
2849 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
2850                            i915_reg_t dp_reg, enum port port,
2851                            enum pipe *pipe)
2852 {
2853         bool ret;
2854         u32 val;
2855
2856         val = intel_de_read(dev_priv, dp_reg);
2857
2858         ret = val & DP_PORT_EN;
2859
2860         /* asserts want to know the pipe even if the port is disabled */
2861         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
2862                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
2863         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
2864                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
2865         else if (IS_CHERRYVIEW(dev_priv))
2866                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
2867         else
2868                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
2869
2870         return ret;
2871 }
2872
2873 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2874                                   enum pipe *pipe)
2875 {
2876         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2877         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2878         intel_wakeref_t wakeref;
2879         bool ret;
2880
2881         wakeref = intel_display_power_get_if_enabled(dev_priv,
2882                                                      encoder->power_domain);
2883         if (!wakeref)
2884                 return false;
2885
2886         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
2887                                     encoder->port, pipe);
2888
2889         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
2890
2891         return ret;
2892 }
2893
2894 static void intel_dp_get_config(struct intel_encoder *encoder,
2895                                 struct intel_crtc_state *pipe_config)
2896 {
2897         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2898         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2899         u32 tmp, flags = 0;
2900         enum port port = encoder->port;
2901         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2902
2903         if (encoder->type == INTEL_OUTPUT_EDP)
2904                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
2905         else
2906                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
2907
2908         tmp = intel_de_read(dev_priv, intel_dp->output_reg);
2909
2910         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2911
2912         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2913                 u32 trans_dp = intel_de_read(dev_priv,
2914                                              TRANS_DP_CTL(crtc->pipe));
2915
2916                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2917                         flags |= DRM_MODE_FLAG_PHSYNC;
2918                 else
2919                         flags |= DRM_MODE_FLAG_NHSYNC;
2920
2921                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2922                         flags |= DRM_MODE_FLAG_PVSYNC;
2923                 else
2924                         flags |= DRM_MODE_FLAG_NVSYNC;
2925         } else {
2926                 if (tmp & DP_SYNC_HS_HIGH)
2927                         flags |= DRM_MODE_FLAG_PHSYNC;
2928                 else
2929                         flags |= DRM_MODE_FLAG_NHSYNC;
2930
2931                 if (tmp & DP_SYNC_VS_HIGH)
2932                         flags |= DRM_MODE_FLAG_PVSYNC;
2933                 else
2934                         flags |= DRM_MODE_FLAG_NVSYNC;
2935         }
2936
2937         pipe_config->hw.adjusted_mode.flags |= flags;
2938
2939         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2940                 pipe_config->limited_color_range = true;
2941
2942         pipe_config->lane_count =
2943                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2944
2945         intel_dp_get_m_n(crtc, pipe_config);
2946
2947         if (port == PORT_A) {
2948                 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2949                         pipe_config->port_clock = 162000;
2950                 else
2951                         pipe_config->port_clock = 270000;
2952         }
2953
2954         pipe_config->hw.adjusted_mode.crtc_clock =
2955                 intel_dotclock_calculate(pipe_config->port_clock,
2956                                          &pipe_config->dp_m_n);
2957
2958         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2959             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2960                 /*
2961                  * This is a big fat ugly hack.
2962                  *
2963                  * Some machines in UEFI boot mode provide us a VBT that has 18
2964                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2965                  * unknown we fail to light up. Yet the same BIOS boots up with
2966                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2967                  * max, not what it tells us to use.
2968                  *
2969                  * Note: This will still be broken if the eDP panel is not lit
2970                  * up by the BIOS, and thus we can't get the mode at module
2971                  * load.
2972                  */
2973                 drm_dbg_kms(&dev_priv->drm,
2974                             "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2975                             pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2976                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2977         }
2978 }
2979
2980 static bool
2981 intel_dp_get_dpcd(struct intel_dp *intel_dp);
2982
2983 /**
2984  * intel_dp_sync_state - sync the encoder state during init/resume
2985  * @encoder: intel encoder to sync
2986  * @crtc_state: state for the CRTC connected to the encoder
2987  *
2988  * Sync any state stored in the encoder wrt. HW state during driver init
2989  * and system resume.
2990  */
2991 void intel_dp_sync_state(struct intel_encoder *encoder,
2992                          const struct intel_crtc_state *crtc_state)
2993 {
2994         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2995
2996         /*
2997          * Don't clobber DPCD if it's been already read out during output
2998          * setup (eDP) or detect.
2999          */
3000         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3001                 intel_dp_get_dpcd(intel_dp);
3002
3003         intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
3004         intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
3005 }
3006
3007 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
3008                                     struct intel_crtc_state *crtc_state)
3009 {
3010         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3011         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3012
3013         /*
3014          * If BIOS has set an unsupported or non-standard link rate for some
3015          * reason force an encoder recompute and full modeset.
3016          */
3017         if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
3018                                 crtc_state->port_clock) < 0) {
3019                 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
3020                 crtc_state->uapi.connectors_changed = true;
3021                 return false;
3022         }
3023
3024         /*
3025          * FIXME hack to force full modeset when DSC is being used.
3026          *
3027          * As long as we do not have full state readout and config comparison
3028          * of crtc_state->dsc, we have no way to ensure reliable fastset.
3029          * Remove once we have readout for DSC.
3030          */
3031         if (crtc_state->dsc.compression_enable) {
3032                 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
3033                 crtc_state->uapi.mode_changed = true;
3034                 return false;
3035         }
3036
3037         if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
3038                 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
3039                 crtc_state->uapi.mode_changed = true;
3040                 return false;
3041         }
3042
3043         return true;
3044 }
3045
3046 static void intel_disable_dp(struct intel_atomic_state *state,
3047                              struct intel_encoder *encoder,
3048                              const struct intel_crtc_state *old_crtc_state,
3049                              const struct drm_connector_state *old_conn_state)
3050 {
3051         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3052
3053         intel_dp->link_trained = false;
3054
3055         if (old_crtc_state->has_audio)
3056                 intel_audio_codec_disable(encoder,
3057                                           old_crtc_state, old_conn_state);
3058
3059         /* Make sure the panel is off before trying to change the mode. But also
3060          * ensure that we have vdd while we switch off the panel. */
3061         intel_pps_vdd_on(intel_dp);
3062         intel_edp_backlight_off(old_conn_state);
3063         intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
3064         intel_pps_off(intel_dp);
3065         intel_dp->frl.is_trained = false;
3066         intel_dp->frl.trained_rate_gbps = 0;
3067 }
3068
3069 static void g4x_disable_dp(struct intel_atomic_state *state,
3070                            struct intel_encoder *encoder,
3071                            const struct intel_crtc_state *old_crtc_state,
3072                            const struct drm_connector_state *old_conn_state)
3073 {
3074         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3075 }
3076
3077 static void vlv_disable_dp(struct intel_atomic_state *state,
3078                            struct intel_encoder *encoder,
3079                            const struct intel_crtc_state *old_crtc_state,
3080                            const struct drm_connector_state *old_conn_state)
3081 {
3082         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3083 }
3084
3085 static void g4x_post_disable_dp(struct intel_atomic_state *state,
3086                                 struct intel_encoder *encoder,
3087                                 const struct intel_crtc_state *old_crtc_state,
3088                                 const struct drm_connector_state *old_conn_state)
3089 {
3090         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3091         enum port port = encoder->port;
3092
3093         /*
3094          * Bspec does not list a specific disable sequence for g4x DP.
3095          * Follow the ilk+ sequence (disable pipe before the port) for
3096          * g4x DP as it does not suffer from underruns like the normal
3097          * g4x modeset sequence (disable pipe after the port).
3098          */
3099         intel_dp_link_down(encoder, old_crtc_state);
3100
3101         /* Only ilk+ has port A */
3102         if (port == PORT_A)
3103                 ilk_edp_pll_off(intel_dp, old_crtc_state);
3104 }
3105
3106 static void vlv_post_disable_dp(struct intel_atomic_state *state,
3107                                 struct intel_encoder *encoder,
3108                                 const struct intel_crtc_state *old_crtc_state,
3109                                 const struct drm_connector_state *old_conn_state)
3110 {
3111         intel_dp_link_down(encoder, old_crtc_state);
3112 }
3113
3114 static void chv_post_disable_dp(struct intel_atomic_state *state,
3115                                 struct intel_encoder *encoder,
3116                                 const struct intel_crtc_state *old_crtc_state,
3117                                 const struct drm_connector_state *old_conn_state)
3118 {
3119         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3120
3121         intel_dp_link_down(encoder, old_crtc_state);
3122
3123         vlv_dpio_get(dev_priv);
3124
3125         /* Assert data lane reset */
3126         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3127
3128         vlv_dpio_put(dev_priv);
3129 }
3130
3131 static void
3132 cpt_set_link_train(struct intel_dp *intel_dp,
3133                    const struct intel_crtc_state *crtc_state,
3134                    u8 dp_train_pat)
3135 {
3136         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3137         u32 *DP = &intel_dp->DP;
3138
3139         *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3140
3141         switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
3142         case DP_TRAINING_PATTERN_DISABLE:
3143                 *DP |= DP_LINK_TRAIN_OFF_CPT;
3144                 break;
3145         case DP_TRAINING_PATTERN_1:
3146                 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3147                 break;
3148         case DP_TRAINING_PATTERN_2:
3149                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3150                 break;
3151         case DP_TRAINING_PATTERN_3:
3152                 drm_dbg_kms(&dev_priv->drm,
3153                             "TPS3 not supported, using TPS2 instead\n");
3154                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3155                 break;
3156         }
3157
3158         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3159         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3160 }
3161
3162 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
3163 {
3164         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3165
3166         /* Clear the cached register set to avoid using stale values */
3167
3168         memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
3169
3170         if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
3171                              intel_dp->pcon_dsc_dpcd,
3172                              sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
3173                 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
3174                         DP_PCON_DSC_ENCODER);
3175
3176         drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
3177                     (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
3178 }
3179
3180 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
3181 {
3182         int bw_gbps[] = {9, 18, 24, 32, 40, 48};
3183         int i;
3184
3185         for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
3186                 if (frl_bw_mask & (1 << i))
3187                         return bw_gbps[i];
3188         }
3189         return 0;
3190 }
3191
3192 static int intel_dp_pcon_set_frl_mask(int max_frl)
3193 {
3194         switch (max_frl) {
3195         case 48:
3196                 return DP_PCON_FRL_BW_MASK_48GBPS;
3197         case 40:
3198                 return DP_PCON_FRL_BW_MASK_40GBPS;
3199         case 32:
3200                 return DP_PCON_FRL_BW_MASK_32GBPS;
3201         case 24:
3202                 return DP_PCON_FRL_BW_MASK_24GBPS;
3203         case 18:
3204                 return DP_PCON_FRL_BW_MASK_18GBPS;
3205         case 9:
3206                 return DP_PCON_FRL_BW_MASK_9GBPS;
3207         }
3208
3209         return 0;
3210 }
3211
3212 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
3213 {
3214         struct intel_connector *intel_connector = intel_dp->attached_connector;
3215         struct drm_connector *connector = &intel_connector->base;
3216         int max_frl_rate;
3217         int max_lanes, rate_per_lane;
3218         int max_dsc_lanes, dsc_rate_per_lane;
3219
3220         max_lanes = connector->display_info.hdmi.max_lanes;
3221         rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
3222         max_frl_rate = max_lanes * rate_per_lane;
3223
3224         if (connector->display_info.hdmi.dsc_cap.v_1p2) {
3225                 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
3226                 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
3227                 if (max_dsc_lanes && dsc_rate_per_lane)
3228                         max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
3229         }
3230
3231         return max_frl_rate;
3232 }
3233
3234 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
3235 {
3236 #define PCON_EXTENDED_TRAIN_MODE (1 > 0)
3237 #define PCON_CONCURRENT_MODE (1 > 0)
3238 #define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
3239 #define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
3240 #define TIMEOUT_FRL_READY_MS 500
3241 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
3242
3243         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3244         int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
3245         u8 max_frl_bw_mask = 0, frl_trained_mask;
3246         bool is_active;
3247
3248         ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
3249         if (ret < 0)
3250                 return ret;
3251
3252         max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
3253         drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
3254
3255         max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
3256         drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
3257
3258         max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
3259
3260         if (max_frl_bw <= 0)
3261                 return -EINVAL;
3262
3263         ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
3264         if (ret < 0)
3265                 return ret;
3266         /* Wait for PCON to be FRL Ready */
3267         wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
3268
3269         if (!is_active)
3270                 return -ETIMEDOUT;
3271
3272         max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
3273         ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
3274         if (ret < 0)
3275                 return ret;
3276         ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
3277         if (ret < 0)
3278                 return ret;
3279         ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
3280         if (ret < 0)
3281                 return ret;
3282         /*
3283          * Wait for FRL to be completed
3284          * Check if the HDMI Link is up and active.
3285          */
3286         wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
3287
3288         if (!is_active)
3289                 return -ETIMEDOUT;
3290
3291         /* Verify HDMI Link configuration shows FRL Mode */
3292         if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
3293             DP_PCON_HDMI_MODE_FRL) {
3294                 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
3295                 return -EINVAL;
3296         }
3297         drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
3298
3299         intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
3300         intel_dp->frl.is_trained = true;
3301         drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
3302
3303         return 0;
3304 }
3305
3306 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
3307 {
3308         if (drm_dp_is_branch(intel_dp->dpcd) &&
3309             intel_dp->has_hdmi_sink &&
3310             intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
3311                 return true;
3312
3313         return false;
3314 }
3315
3316 void intel_dp_check_frl_training(struct intel_dp *intel_dp)
3317 {
3318         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3319
3320         /* Always go for FRL training if supported */
3321         if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
3322             intel_dp->frl.is_trained)
3323                 return;
3324
3325         if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
3326                 int ret, mode;
3327
3328                 drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
3329                 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
3330                 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
3331
3332                 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
3333                         drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
3334         } else {
3335                 drm_dbg(&dev_priv->drm, "FRL training Completed\n");
3336         }
3337 }
3338
3339 static int
3340 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
3341 {
3342         int vactive = crtc_state->hw.adjusted_mode.vdisplay;
3343
3344         return intel_hdmi_dsc_get_slice_height(vactive);
3345 }
3346
3347 static int
3348 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
3349                              const struct intel_crtc_state *crtc_state)
3350 {
3351         struct intel_connector *intel_connector = intel_dp->attached_connector;
3352         struct drm_connector *connector = &intel_connector->base;
3353         int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
3354         int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
3355         int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
3356         int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
3357
3358         return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
3359                                              pcon_max_slice_width,
3360                                              hdmi_max_slices, hdmi_throughput);
3361 }
3362
3363 static int
3364 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
3365                           const struct intel_crtc_state *crtc_state,
3366                           int num_slices, int slice_width)
3367 {
3368         struct intel_connector *intel_connector = intel_dp->attached_connector;
3369         struct drm_connector *connector = &intel_connector->base;
3370         int output_format = crtc_state->output_format;
3371         bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
3372         int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
3373         int hdmi_max_chunk_bytes =
3374                 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
3375
3376         return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
3377                                       num_slices, output_format, hdmi_all_bpp,
3378                                       hdmi_max_chunk_bytes);
3379 }
3380
3381 void
3382 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
3383                             const struct intel_crtc_state *crtc_state)
3384 {
3385         u8 pps_param[6];
3386         int slice_height;
3387         int slice_width;
3388         int num_slices;
3389         int bits_per_pixel;
3390         int ret;
3391         struct intel_connector *intel_connector = intel_dp->attached_connector;
3392         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3393         struct drm_connector *connector;
3394         bool hdmi_is_dsc_1_2;
3395
3396         if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
3397                 return;
3398
3399         if (!intel_connector)
3400                 return;
3401         connector = &intel_connector->base;
3402         hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
3403
3404         if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
3405             !hdmi_is_dsc_1_2)
3406                 return;
3407
3408         slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
3409         if (!slice_height)
3410                 return;
3411
3412         num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
3413         if (!num_slices)
3414                 return;
3415
3416         slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
3417                                    num_slices);
3418
3419         bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
3420                                                    num_slices, slice_width);
3421         if (!bits_per_pixel)
3422                 return;
3423
3424         pps_param[0] = slice_height & 0xFF;
3425         pps_param[1] = slice_height >> 8;
3426         pps_param[2] = slice_width & 0xFF;
3427         pps_param[3] = slice_width >> 8;
3428         pps_param[4] = bits_per_pixel & 0xFF;
3429         pps_param[5] = (bits_per_pixel >> 8) & 0x3;
3430
3431         ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
3432         if (ret < 0)
3433                 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
3434 }
3435
3436 static void
3437 g4x_set_link_train(struct intel_dp *intel_dp,
3438                    const struct intel_crtc_state *crtc_state,
3439                    u8 dp_train_pat)
3440 {
3441         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3442         u32 *DP = &intel_dp->DP;
3443
3444         *DP &= ~DP_LINK_TRAIN_MASK;
3445
3446         switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
3447         case DP_TRAINING_PATTERN_DISABLE:
3448                 *DP |= DP_LINK_TRAIN_OFF;
3449                 break;
3450         case DP_TRAINING_PATTERN_1:
3451                 *DP |= DP_LINK_TRAIN_PAT_1;
3452                 break;
3453         case DP_TRAINING_PATTERN_2:
3454                 *DP |= DP_LINK_TRAIN_PAT_2;
3455                 break;
3456         case DP_TRAINING_PATTERN_3:
3457                 drm_dbg_kms(&dev_priv->drm,
3458                             "TPS3 not supported, using TPS2 instead\n");
3459                 *DP |= DP_LINK_TRAIN_PAT_2;
3460                 break;
3461         }
3462
3463         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3464         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3465 }
3466
3467 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3468                                  const struct intel_crtc_state *crtc_state)
3469 {
3470         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3471
3472         /* enable with pattern 1 (as per spec) */
3473
3474         intel_dp_program_link_training_pattern(intel_dp, crtc_state,
3475                                                DP_TRAINING_PATTERN_1);
3476
3477         /*
3478          * Magic for VLV/CHV. We _must_ first set up the register
3479          * without actually enabling the port, and then do another
3480          * write to enable the port. Otherwise link training will
3481          * fail when the power sequencer is freshly used for this port.
3482          */
3483         intel_dp->DP |= DP_PORT_EN;
3484         if (crtc_state->has_audio)
3485                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3486
3487         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3488         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3489 }
3490
3491 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
3492                                            const struct intel_crtc_state *crtc_state)
3493 {
3494         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3495         u8 tmp;
3496
3497         if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
3498                 return;
3499
3500         if (!drm_dp_is_branch(intel_dp->dpcd))
3501                 return;
3502
3503         tmp = intel_dp->has_hdmi_sink ?
3504                 DP_HDMI_DVI_OUTPUT_CONFIG : 0;
3505
3506         if (drm_dp_dpcd_writeb(&intel_dp->aux,
3507                                DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
3508                 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
3509                             enableddisabled(intel_dp->has_hdmi_sink));
3510
3511         tmp = intel_dp->dfp.ycbcr_444_to_420 ?
3512                 DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
3513
3514         if (drm_dp_dpcd_writeb(&intel_dp->aux,
3515                                DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
3516                 drm_dbg_kms(&i915->drm,
3517                             "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
3518                             enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
3519
3520         tmp = 0;
3521         if (intel_dp->dfp.rgb_to_ycbcr) {
3522                 bool bt2020, bt709;
3523
3524                 /*
3525                  * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
3526                  * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
3527                  *
3528                  */
3529                 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
3530
3531                 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
3532                                                                    intel_dp->downstream_ports,
3533                                                                    DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
3534                 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
3535                                                                   intel_dp->downstream_ports,
3536                                                                   DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
3537                 switch (crtc_state->infoframes.vsc.colorimetry) {
3538                 case DP_COLORIMETRY_BT2020_RGB:
3539                 case DP_COLORIMETRY_BT2020_YCC:
3540                         if (bt2020)
3541                                 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
3542                         break;
3543                 case DP_COLORIMETRY_BT709_YCC:
3544                 case DP_COLORIMETRY_XVYCC_709:
3545                         if (bt709)
3546                                 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
3547                         break;
3548                 default:
3549                         break;
3550                 }
3551         }
3552
3553         if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
3554                 drm_dbg_kms(&i915->drm,
3555                            "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
3556                            enableddisabled(tmp ? true : false));
3557 }
3558
3559 static void intel_enable_dp(struct intel_atomic_state *state,
3560                             struct intel_encoder *encoder,
3561                             const struct intel_crtc_state *pipe_config,
3562                             const struct drm_connector_state *conn_state)
3563 {
3564         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3565         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3566         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3567         u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
3568         enum pipe pipe = crtc->pipe;
3569         intel_wakeref_t wakeref;
3570
3571         if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
3572                 return;
3573
3574         with_intel_pps_lock(intel_dp, wakeref) {
3575                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3576                         vlv_pps_init(encoder, pipe_config);
3577
3578                 intel_dp_enable_port(intel_dp, pipe_config);
3579
3580                 intel_pps_vdd_on_unlocked(intel_dp);
3581                 intel_pps_on_unlocked(intel_dp);
3582                 intel_pps_vdd_off_unlocked(intel_dp, true);
3583         }
3584
3585         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3586                 unsigned int lane_mask = 0x0;
3587
3588                 if (IS_CHERRYVIEW(dev_priv))
3589                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3590
3591                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3592                                     lane_mask);
3593         }
3594
3595         intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
3596         intel_dp_configure_protocol_converter(intel_dp, pipe_config);
3597         intel_dp_check_frl_training(intel_dp);
3598         intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
3599         intel_dp_start_link_train(intel_dp, pipe_config);
3600         intel_dp_stop_link_train(intel_dp, pipe_config);
3601
3602         if (pipe_config->has_audio) {
3603                 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
3604                         pipe_name(pipe));
3605                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3606         }
3607 }
3608
3609 static void g4x_enable_dp(struct intel_atomic_state *state,
3610                           struct intel_encoder *encoder,
3611                           const struct intel_crtc_state *pipe_config,
3612                           const struct drm_connector_state *conn_state)
3613 {
3614         intel_enable_dp(state, encoder, pipe_config, conn_state);
3615         intel_edp_backlight_on(pipe_config, conn_state);
3616 }
3617
3618 static void vlv_enable_dp(struct intel_atomic_state *state,
3619                           struct intel_encoder *encoder,
3620                           const struct intel_crtc_state *pipe_config,
3621                           const struct drm_connector_state *conn_state)
3622 {
3623         intel_edp_backlight_on(pipe_config, conn_state);
3624 }
3625
3626 static void g4x_pre_enable_dp(struct intel_atomic_state *state,
3627                               struct intel_encoder *encoder,
3628                               const struct intel_crtc_state *pipe_config,
3629                               const struct drm_connector_state *conn_state)
3630 {
3631         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3632         enum port port = encoder->port;
3633
3634         intel_dp_prepare(encoder, pipe_config);
3635
3636         /* Only ilk+ has port A */
3637         if (port == PORT_A)
3638                 ilk_edp_pll_on(intel_dp, pipe_config);
3639 }
3640
3641 static void vlv_pre_enable_dp(struct intel_atomic_state *state,
3642                               struct intel_encoder *encoder,
3643                               const struct intel_crtc_state *pipe_config,
3644                               const struct drm_connector_state *conn_state)
3645 {
3646         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3647
3648         intel_enable_dp(state, encoder, pipe_config, conn_state);
3649 }
3650
3651 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
3652                                   struct intel_encoder *encoder,
3653                                   const struct intel_crtc_state *pipe_config,
3654                                   const struct drm_connector_state *conn_state)
3655 {
3656         intel_dp_prepare(encoder, pipe_config);
3657
3658         vlv_phy_pre_pll_enable(encoder, pipe_config);
3659 }
3660
3661 static void chv_pre_enable_dp(struct intel_atomic_state *state,
3662                               struct intel_encoder *encoder,
3663                               const struct intel_crtc_state *pipe_config,
3664                               const struct drm_connector_state *conn_state)
3665 {
3666         chv_phy_pre_encoder_enable(encoder, pipe_config);
3667
3668         intel_enable_dp(state, encoder, pipe_config, conn_state);
3669
3670         /* Second common lane will stay alive on its own now */
3671         chv_phy_release_cl2_override(encoder);
3672 }
3673
3674 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
3675                                   struct intel_encoder *encoder,
3676                                   const struct intel_crtc_state *pipe_config,
3677                                   const struct drm_connector_state *conn_state)
3678 {
3679         intel_dp_prepare(encoder, pipe_config);
3680
3681         chv_phy_pre_pll_enable(encoder, pipe_config);
3682 }
3683
3684 static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
3685                                     struct intel_encoder *encoder,
3686                                     const struct intel_crtc_state *old_crtc_state,
3687                                     const struct drm_connector_state *old_conn_state)
3688 {
3689         chv_phy_post_pll_disable(encoder, old_crtc_state);
3690 }
3691
3692 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
3693                                  const struct intel_crtc_state *crtc_state)
3694 {
3695         return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3696 }
3697
3698 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp,
3699                                  const struct intel_crtc_state *crtc_state)
3700 {
3701         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3702 }
3703
3704 static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp)
3705 {
3706         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3707 }
3708
3709 static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
3710 {
3711         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3712 }
3713
3714 static void vlv_set_signal_levels(struct intel_dp *intel_dp,
3715                                   const struct intel_crtc_state *crtc_state)
3716 {
3717         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3718         unsigned long demph_reg_value, preemph_reg_value,
3719                 uniqtranscale_reg_value;
3720         u8 train_set = intel_dp->train_set[0];
3721
3722         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3723         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3724                 preemph_reg_value = 0x0004000;
3725                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3726                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3727                         demph_reg_value = 0x2B405555;
3728                         uniqtranscale_reg_value = 0x552AB83A;
3729                         break;
3730                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3731                         demph_reg_value = 0x2B404040;
3732                         uniqtranscale_reg_value = 0x5548B83A;
3733                         break;
3734                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3735                         demph_reg_value = 0x2B245555;
3736                         uniqtranscale_reg_value = 0x5560B83A;
3737                         break;
3738                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3739                         demph_reg_value = 0x2B405555;
3740                         uniqtranscale_reg_value = 0x5598DA3A;
3741                         break;
3742                 default:
3743                         return;
3744                 }
3745                 break;
3746         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3747                 preemph_reg_value = 0x0002000;
3748                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3749                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3750                         demph_reg_value = 0x2B404040;
3751                         uniqtranscale_reg_value = 0x5552B83A;
3752                         break;
3753                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3754                         demph_reg_value = 0x2B404848;
3755                         uniqtranscale_reg_value = 0x5580B83A;
3756                         break;
3757                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3758                         demph_reg_value = 0x2B404040;
3759                         uniqtranscale_reg_value = 0x55ADDA3A;
3760                         break;
3761                 default:
3762                         return;
3763                 }
3764                 break;
3765         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3766                 preemph_reg_value = 0x0000000;
3767                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3768                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3769                         demph_reg_value = 0x2B305555;
3770                         uniqtranscale_reg_value = 0x5570B83A;
3771                         break;
3772                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3773                         demph_reg_value = 0x2B2B4040;
3774                         uniqtranscale_reg_value = 0x55ADDA3A;
3775                         break;
3776                 default:
3777                         return;
3778                 }
3779                 break;
3780         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3781                 preemph_reg_value = 0x0006000;
3782                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3783                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3784                         demph_reg_value = 0x1B405555;
3785                         uniqtranscale_reg_value = 0x55ADDA3A;
3786                         break;
3787                 default:
3788                         return;
3789                 }
3790                 break;
3791         default:
3792                 return;
3793         }
3794
3795         vlv_set_phy_signal_level(encoder, crtc_state,
3796                                  demph_reg_value, preemph_reg_value,
3797                                  uniqtranscale_reg_value, 0);
3798 }
3799
3800 static void chv_set_signal_levels(struct intel_dp *intel_dp,
3801                                   const struct intel_crtc_state *crtc_state)
3802 {
3803         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3804         u32 deemph_reg_value, margin_reg_value;
3805         bool uniq_trans_scale = false;
3806         u8 train_set = intel_dp->train_set[0];
3807
3808         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3809         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3810                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3811                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3812                         deemph_reg_value = 128;
3813                         margin_reg_value = 52;
3814                         break;
3815                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3816                         deemph_reg_value = 128;
3817                         margin_reg_value = 77;
3818                         break;
3819                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3820                         deemph_reg_value = 128;
3821                         margin_reg_value = 102;
3822                         break;
3823                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3824                         deemph_reg_value = 128;
3825                         margin_reg_value = 154;
3826                         uniq_trans_scale = true;
3827                         break;
3828                 default:
3829                         return;
3830                 }
3831                 break;
3832         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3833                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3834                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3835                         deemph_reg_value = 85;
3836                         margin_reg_value = 78;
3837                         break;
3838                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3839                         deemph_reg_value = 85;
3840                         margin_reg_value = 116;
3841                         break;
3842                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3843                         deemph_reg_value = 85;
3844                         margin_reg_value = 154;
3845                         break;
3846                 default:
3847                         return;
3848                 }
3849                 break;
3850         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3851                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3852                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3853                         deemph_reg_value = 64;
3854                         margin_reg_value = 104;
3855                         break;
3856                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3857                         deemph_reg_value = 64;
3858                         margin_reg_value = 154;
3859                         break;
3860                 default:
3861                         return;
3862                 }
3863                 break;
3864         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3865                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3866                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3867                         deemph_reg_value = 43;
3868                         margin_reg_value = 154;
3869                         break;
3870                 default:
3871                         return;
3872                 }
3873                 break;
3874         default:
3875                 return;
3876         }
3877
3878         chv_set_phy_signal_level(encoder, crtc_state,
3879                                  deemph_reg_value, margin_reg_value,
3880                                  uniq_trans_scale);
3881 }
3882
3883 static u32 g4x_signal_levels(u8 train_set)
3884 {
3885         u32 signal_levels = 0;
3886
3887         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3888         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3889         default:
3890                 signal_levels |= DP_VOLTAGE_0_4;
3891                 break;
3892         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3893                 signal_levels |= DP_VOLTAGE_0_6;
3894                 break;
3895         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3896                 signal_levels |= DP_VOLTAGE_0_8;
3897                 break;
3898         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3899                 signal_levels |= DP_VOLTAGE_1_2;
3900                 break;
3901         }
3902         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3903         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3904         default:
3905                 signal_levels |= DP_PRE_EMPHASIS_0;
3906                 break;
3907         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3908                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3909                 break;
3910         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3911                 signal_levels |= DP_PRE_EMPHASIS_6;
3912                 break;
3913         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3914                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3915                 break;
3916         }
3917         return signal_levels;
3918 }
3919
3920 static void
3921 g4x_set_signal_levels(struct intel_dp *intel_dp,
3922                       const struct intel_crtc_state *crtc_state)
3923 {
3924         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3925         u8 train_set = intel_dp->train_set[0];
3926         u32 signal_levels;
3927
3928         signal_levels = g4x_signal_levels(train_set);
3929
3930         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
3931                     signal_levels);
3932
3933         intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
3934         intel_dp->DP |= signal_levels;
3935
3936         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3937         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3938 }
3939
3940 /* SNB CPU eDP voltage swing and pre-emphasis control */
3941 static u32 snb_cpu_edp_signal_levels(u8 train_set)
3942 {
3943         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3944                                         DP_TRAIN_PRE_EMPHASIS_MASK);
3945
3946         switch (signal_levels) {
3947         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3948         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3949                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3950         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3951                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3952         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3953         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3954                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3955         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3956         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3957                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3958         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3959         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3960                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3961         default:
3962                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3963                               "0x%x\n", signal_levels);
3964                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3965         }
3966 }
3967
3968 static void
3969 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
3970                               const struct intel_crtc_state *crtc_state)
3971 {
3972         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3973         u8 train_set = intel_dp->train_set[0];
3974         u32 signal_levels;
3975
3976         signal_levels = snb_cpu_edp_signal_levels(train_set);
3977
3978         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
3979                     signal_levels);
3980
3981         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3982         intel_dp->DP |= signal_levels;
3983
3984         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3985         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3986 }
3987
3988 /* IVB CPU eDP voltage swing and pre-emphasis control */
3989 static u32 ivb_cpu_edp_signal_levels(u8 train_set)
3990 {
3991         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3992                                         DP_TRAIN_PRE_EMPHASIS_MASK);
3993
3994         switch (signal_levels) {
3995         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3996                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3997         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3998                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3999         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4000         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4001                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4002
4003         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4004                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
4005         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4006                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4007
4008         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4009                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
4010         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4011                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4012
4013         default:
4014                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4015                               "0x%x\n", signal_levels);
4016                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4017         }
4018 }
4019
4020 static void
4021 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
4022                               const struct intel_crtc_state *crtc_state)
4023 {
4024         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4025         u8 train_set = intel_dp->train_set[0];
4026         u32 signal_levels;
4027
4028         signal_levels = ivb_cpu_edp_signal_levels(train_set);
4029
4030         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4031                     signal_levels);
4032
4033         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4034         intel_dp->DP |= signal_levels;
4035
4036         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4037         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4038 }
4039
4040 void
4041 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4042                                        const struct intel_crtc_state *crtc_state,
4043                                        u8 dp_train_pat)
4044 {
4045         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4046
4047         if ((intel_dp_training_pattern_symbol(dp_train_pat)) !=
4048             DP_TRAINING_PATTERN_DISABLE)
4049                 drm_dbg_kms(&dev_priv->drm,
4050                             "Using DP training pattern TPS%d\n",
4051                             intel_dp_training_pattern_symbol(dp_train_pat));
4052
4053         intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
4054 }
4055
4056 static void
4057 intel_dp_link_down(struct intel_encoder *encoder,
4058                    const struct intel_crtc_state *old_crtc_state)
4059 {
4060         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4061         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4062         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4063         enum port port = encoder->port;
4064         u32 DP = intel_dp->DP;
4065
4066         if (drm_WARN_ON(&dev_priv->drm,
4067                         (intel_de_read(dev_priv, intel_dp->output_reg) &
4068                          DP_PORT_EN) == 0))
4069                 return;
4070
4071         drm_dbg_kms(&dev_priv->drm, "\n");
4072
4073         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4074             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4075                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4076                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4077         } else {
4078                 DP &= ~DP_LINK_TRAIN_MASK;
4079                 DP |= DP_LINK_TRAIN_PAT_IDLE;
4080         }
4081         intel_de_write(dev_priv, intel_dp->output_reg, DP);
4082         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4083
4084         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4085         intel_de_write(dev_priv, intel_dp->output_reg, DP);
4086         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4087
4088         /*
4089          * HW workaround for IBX, we need to move the port
4090          * to transcoder A after disabling it to allow the
4091          * matching HDMI port to be enabled on transcoder A.
4092          */
4093         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4094                 /*
4095                  * We get CPU/PCH FIFO underruns on the other pipe when
4096                  * doing the workaround. Sweep them under the rug.
4097                  */
4098                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4099                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4100
4101                 /* always enable with pattern 1 (as per spec) */
4102                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4103                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4104                         DP_LINK_TRAIN_PAT_1;
4105                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4106                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4107
4108                 DP &= ~DP_PORT_EN;
4109                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4110                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4111
4112                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4113                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4114                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4115         }
4116
4117         msleep(intel_dp->panel_power_down_delay);
4118
4119         intel_dp->DP = DP;
4120
4121         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4122                 intel_wakeref_t wakeref;
4123
4124                 with_intel_pps_lock(intel_dp, wakeref)
4125                         intel_dp->active_pipe = INVALID_PIPE;
4126         }
4127 }
4128
4129 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4130 {
4131         u8 dprx = 0;
4132
4133         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4134                               &dprx) != 1)
4135                 return false;
4136         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4137 }
4138
4139 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4140 {
4141         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4142
4143         /*
4144          * Clear the cached register set to avoid using stale values
4145          * for the sinks that do not support DSC.
4146          */
4147         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4148
4149         /* Clear fec_capable to avoid using stale values */
4150         intel_dp->fec_capable = 0;
4151
4152         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4153         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4154             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4155                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4156                                      intel_dp->dsc_dpcd,
4157                                      sizeof(intel_dp->dsc_dpcd)) < 0)
4158                         drm_err(&i915->drm,
4159                                 "Failed to read DPCD register 0x%x\n",
4160                                 DP_DSC_SUPPORT);
4161
4162                 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
4163                             (int)sizeof(intel_dp->dsc_dpcd),
4164                             intel_dp->dsc_dpcd);
4165
4166                 /* FEC is supported only on DP 1.4 */
4167                 if (!intel_dp_is_edp(intel_dp) &&
4168                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4169                                       &intel_dp->fec_capable) < 0)
4170                         drm_err(&i915->drm,
4171                                 "Failed to read FEC DPCD register\n");
4172
4173                 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
4174                             intel_dp->fec_capable);
4175         }
4176 }
4177
4178 static bool
4179 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4180 {
4181         struct drm_i915_private *dev_priv =
4182                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4183
4184         /* this function is meant to be called only once */
4185         drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
4186
4187         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
4188                 return false;
4189
4190         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4191                          drm_dp_is_branch(intel_dp->dpcd));
4192
4193         /*
4194          * Read the eDP display control registers.
4195          *
4196          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4197          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4198          * set, but require eDP 1.4+ detection (e.g. for supported link rates
4199          * method). The display control registers should read zero if they're
4200          * not supported anyway.
4201          */
4202         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4203                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4204                              sizeof(intel_dp->edp_dpcd))
4205                 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
4206                             (int)sizeof(intel_dp->edp_dpcd),
4207                             intel_dp->edp_dpcd);
4208
4209         /*
4210          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4211          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4212          */
4213         intel_psr_init_dpcd(intel_dp);
4214
4215         /* Read the eDP 1.4+ supported link rates. */
4216         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4217                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4218                 int i;
4219
4220                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4221                                 sink_rates, sizeof(sink_rates));
4222
4223                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4224                         int val = le16_to_cpu(sink_rates[i]);
4225
4226                         if (val == 0)
4227                                 break;
4228
4229                         /* Value read multiplied by 200kHz gives the per-lane
4230                          * link rate in kHz. The source rates are, however,
4231                          * stored in terms of LS_Clk kHz. The full conversion
4232                          * back to symbols is
4233                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4234                          */
4235                         intel_dp->sink_rates[i] = (val * 200) / 10;
4236                 }
4237                 intel_dp->num_sink_rates = i;
4238         }
4239
4240         /*
4241          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4242          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4243          */
4244         if (intel_dp->num_sink_rates)
4245                 intel_dp->use_rate_select = true;
4246         else
4247                 intel_dp_set_sink_rates(intel_dp);
4248
4249         intel_dp_set_common_rates(intel_dp);
4250
4251         /* Read the eDP DSC DPCD registers */
4252         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4253                 intel_dp_get_dsc_sink_cap(intel_dp);
4254
4255         /*
4256          * If needed, program our source OUI so we can make various Intel-specific AUX services
4257          * available (such as HDR backlight controls)
4258          */
4259         intel_edp_init_source_oui(intel_dp, true);
4260
4261         return true;
4262 }
4263
4264 static bool
4265 intel_dp_has_sink_count(struct intel_dp *intel_dp)
4266 {
4267         if (!intel_dp->attached_connector)
4268                 return false;
4269
4270         return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
4271                                           intel_dp->dpcd,
4272                                           &intel_dp->desc);
4273 }
4274
4275 static bool
4276 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4277 {
4278         int ret;
4279
4280         intel_dp_lttpr_init(intel_dp);
4281
4282         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
4283                 return false;
4284
4285         /*
4286          * Don't clobber cached eDP rates. Also skip re-reading
4287          * the OUI/ID since we know it won't change.
4288          */
4289         if (!intel_dp_is_edp(intel_dp)) {
4290                 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4291                                  drm_dp_is_branch(intel_dp->dpcd));
4292
4293                 intel_dp_set_sink_rates(intel_dp);
4294                 intel_dp_set_common_rates(intel_dp);
4295         }
4296
4297         if (intel_dp_has_sink_count(intel_dp)) {
4298                 ret = drm_dp_read_sink_count(&intel_dp->aux);
4299                 if (ret < 0)
4300                         return false;
4301
4302                 /*
4303                  * Sink count can change between short pulse hpd hence
4304                  * a member variable in intel_dp will track any changes
4305                  * between short pulse interrupts.
4306                  */
4307                 intel_dp->sink_count = ret;
4308
4309                 /*
4310                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4311                  * a dongle is present but no display. Unless we require to know
4312                  * if a dongle is present or not, we don't need to update
4313                  * downstream port information. So, an early return here saves
4314                  * time from performing other operations which are not required.
4315                  */
4316                 if (!intel_dp->sink_count)
4317                         return false;
4318         }
4319
4320         return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
4321                                            intel_dp->downstream_ports) == 0;
4322 }
4323
4324 static bool
4325 intel_dp_can_mst(struct intel_dp *intel_dp)
4326 {
4327         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4328
4329         return i915->params.enable_dp_mst &&
4330                 intel_dp->can_mst &&
4331                 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4332 }
4333
4334 static void
4335 intel_dp_configure_mst(struct intel_dp *intel_dp)
4336 {
4337         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4338         struct intel_encoder *encoder =
4339                 &dp_to_dig_port(intel_dp)->base;
4340         bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4341
4342         drm_dbg_kms(&i915->drm,
4343                     "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
4344                     encoder->base.base.id, encoder->base.name,
4345                     yesno(intel_dp->can_mst), yesno(sink_can_mst),
4346                     yesno(i915->params.enable_dp_mst));
4347
4348         if (!intel_dp->can_mst)
4349                 return;
4350
4351         intel_dp->is_mst = sink_can_mst &&
4352                 i915->params.enable_dp_mst;
4353
4354         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4355                                         intel_dp->is_mst);
4356 }
4357
4358 static bool
4359 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4360 {
4361         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4362                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4363                 DP_DPRX_ESI_LEN;
4364 }
4365
4366 bool
4367 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4368                        const struct drm_connector_state *conn_state)
4369 {
4370         /*
4371          * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4372          * of Color Encoding Format and Content Color Gamut], in order to
4373          * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4374          */
4375         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4376                 return true;
4377
4378         switch (conn_state->colorspace) {
4379         case DRM_MODE_COLORIMETRY_SYCC_601:
4380         case DRM_MODE_COLORIMETRY_OPYCC_601:
4381         case DRM_MODE_COLORIMETRY_BT2020_YCC:
4382         case DRM_MODE_COLORIMETRY_BT2020_RGB:
4383         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4384                 return true;
4385         default:
4386                 break;
4387         }
4388
4389         return false;
4390 }
4391
4392 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
4393                                      struct dp_sdp *sdp, size_t size)
4394 {
4395         size_t length = sizeof(struct dp_sdp);
4396
4397         if (size < length)
4398                 return -ENOSPC;
4399
4400         memset(sdp, 0, size);
4401
4402         /*
4403          * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
4404          * VSC SDP Header Bytes
4405          */
4406         sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
4407         sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
4408         sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
4409         sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
4410
4411         /*
4412          * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
4413          * per DP 1.4a spec.
4414          */
4415         if (vsc->revision != 0x5)
4416                 goto out;
4417
4418         /* VSC SDP Payload for DB16 through DB18 */
4419         /* Pixel Encoding and Colorimetry Formats  */
4420         sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
4421         sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
4422
4423         switch (vsc->bpc) {
4424         case 6:
4425                 /* 6bpc: 0x0 */
4426                 break;
4427         case 8:
4428                 sdp->db[17] = 0x1; /* DB17[3:0] */
4429                 break;
4430         case 10:
4431                 sdp->db[17] = 0x2;
4432                 break;
4433         case 12:
4434                 sdp->db[17] = 0x3;
4435                 break;
4436         case 16:
4437                 sdp->db[17] = 0x4;
4438                 break;
4439         default:
4440                 MISSING_CASE(vsc->bpc);
4441                 break;
4442         }
4443         /* Dynamic Range and Component Bit Depth */
4444         if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
4445                 sdp->db[17] |= 0x80;  /* DB17[7] */
4446
4447         /* Content Type */
4448         sdp->db[18] = vsc->content_type & 0x7;
4449
4450 out:
4451         return length;
4452 }
4453
4454 static ssize_t
4455 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
4456                                          struct dp_sdp *sdp,
4457                                          size_t size)
4458 {
4459         size_t length = sizeof(struct dp_sdp);
4460         const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4461         unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4462         ssize_t len;
4463
4464         if (size < length)
4465                 return -ENOSPC;
4466
4467         memset(sdp, 0, size);
4468
4469         len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
4470         if (len < 0) {
4471                 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
4472                 return -ENOSPC;
4473         }
4474
4475         if (len != infoframe_size) {
4476                 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
4477                 return -ENOSPC;
4478         }
4479
4480         /*
4481          * Set up the infoframe sdp packet for HDR static metadata.
4482          * Prepare VSC Header for SU as per DP 1.4a spec,
4483          * Table 2-100 and Table 2-101
4484          */
4485
4486         /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
4487         sdp->sdp_header.HB0 = 0;
4488         /*
4489          * Packet Type 80h + Non-audio INFOFRAME Type value
4490          * HDMI_INFOFRAME_TYPE_DRM: 0x87
4491          * - 80h + Non-audio INFOFRAME Type value
4492          * - InfoFrame Type: 0x07
4493          *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
4494          */
4495         sdp->sdp_header.HB1 = drm_infoframe->type;
4496         /*
4497          * Least Significant Eight Bits of (Data Byte Count – 1)
4498          * infoframe_size - 1
4499          */
4500         sdp->sdp_header.HB2 = 0x1D;
4501         /* INFOFRAME SDP Version Number */
4502         sdp->sdp_header.HB3 = (0x13 << 2);
4503         /* CTA Header Byte 2 (INFOFRAME Version Number) */
4504         sdp->db[0] = drm_infoframe->version;
4505         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4506         sdp->db[1] = drm_infoframe->length;
4507         /*
4508          * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4509          * HDMI_INFOFRAME_HEADER_SIZE
4510          */
4511         BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4512         memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4513                HDMI_DRM_INFOFRAME_SIZE);
4514
4515         /*
4516          * Size of DP infoframe sdp packet for HDR static metadata consists of
4517          * - DP SDP Header(struct dp_sdp_header): 4 bytes
4518          * - Two Data Blocks: 2 bytes
4519          *    CTA Header Byte2 (INFOFRAME Version Number)
4520          *    CTA Header Byte3 (Length of INFOFRAME)
4521          * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
4522          *
4523          * Prior to GEN11's GMP register size is identical to DP HDR static metadata
4524          * infoframe size. But GEN11+ has larger than that size, write_infoframe
4525          * will pad rest of the size.
4526          */
4527         return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
4528 }
4529
4530 static void intel_write_dp_sdp(struct intel_encoder *encoder,
4531                                const struct intel_crtc_state *crtc_state,
4532                                unsigned int type)
4533 {
4534         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4535         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4536         struct dp_sdp sdp = {};
4537         ssize_t len;
4538
4539         if ((crtc_state->infoframes.enable &
4540              intel_hdmi_infoframe_enable(type)) == 0)
4541                 return;
4542
4543         switch (type) {
4544         case DP_SDP_VSC:
4545                 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
4546                                             sizeof(sdp));
4547                 break;
4548         case HDMI_PACKET_TYPE_GAMUT_METADATA:
4549                 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
4550                                                                &sdp, sizeof(sdp));
4551                 break;
4552         default:
4553                 MISSING_CASE(type);
4554                 return;
4555         }
4556
4557         if (drm_WARN_ON(&dev_priv->drm, len < 0))
4558                 return;
4559
4560         dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
4561 }
4562
4563 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
4564                             const struct intel_crtc_state *crtc_state,
4565                             struct drm_dp_vsc_sdp *vsc)
4566 {
4567         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4568         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4569         struct dp_sdp sdp = {};
4570         ssize_t len;
4571
4572         len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
4573
4574         if (drm_WARN_ON(&dev_priv->drm, len < 0))
4575                 return;
4576
4577         dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
4578                                         &sdp, len);
4579 }
4580
4581 void intel_dp_set_infoframes(struct intel_encoder *encoder,
4582                              bool enable,
4583                              const struct intel_crtc_state *crtc_state,
4584                              const struct drm_connector_state *conn_state)
4585 {
4586         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4587         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4588         i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
4589         u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
4590                          VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
4591                          VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
4592         u32 val = intel_de_read(dev_priv, reg);
4593
4594         /* TODO: Add DSC case (DIP_ENABLE_PPS) */
4595         /* When PSR is enabled, this routine doesn't disable VSC DIP */
4596         if (intel_psr_enabled(intel_dp))
4597                 val &= ~dip_enable;
4598         else
4599                 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
4600
4601         if (!enable) {
4602                 intel_de_write(dev_priv, reg, val);
4603                 intel_de_posting_read(dev_priv, reg);
4604                 return;
4605         }
4606
4607         intel_de_write(dev_priv, reg, val);
4608         intel_de_posting_read(dev_priv, reg);
4609
4610         /* When PSR is enabled, VSC SDP is handled by PSR routine */
4611         if (!intel_psr_enabled(intel_dp))
4612                 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
4613
4614         intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
4615 }
4616
4617 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
4618                                    const void *buffer, size_t size)
4619 {
4620         const struct dp_sdp *sdp = buffer;
4621
4622         if (size < sizeof(struct dp_sdp))
4623                 return -EINVAL;
4624
4625         memset(vsc, 0, size);
4626
4627         if (sdp->sdp_header.HB0 != 0)
4628                 return -EINVAL;
4629
4630         if (sdp->sdp_header.HB1 != DP_SDP_VSC)
4631                 return -EINVAL;
4632
4633         vsc->sdp_type = sdp->sdp_header.HB1;
4634         vsc->revision = sdp->sdp_header.HB2;
4635         vsc->length = sdp->sdp_header.HB3;
4636
4637         if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
4638             (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
4639                 /*
4640                  * - HB2 = 0x2, HB3 = 0x8
4641                  *   VSC SDP supporting 3D stereo + PSR
4642                  * - HB2 = 0x4, HB3 = 0xe
4643                  *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
4644                  *   first scan line of the SU region (applies to eDP v1.4b
4645                  *   and higher).
4646                  */
4647                 return 0;
4648         } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
4649                 /*
4650                  * - HB2 = 0x5, HB3 = 0x13
4651                  *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
4652                  *   Format.
4653                  */
4654                 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
4655                 vsc->colorimetry = sdp->db[16] & 0xf;
4656                 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
4657
4658                 switch (sdp->db[17] & 0x7) {
4659                 case 0x0:
4660                         vsc->bpc = 6;
4661                         break;
4662                 case 0x1:
4663                         vsc->bpc = 8;
4664                         break;
4665                 case 0x2:
4666                         vsc->bpc = 10;
4667                         break;
4668                 case 0x3:
4669                         vsc->bpc = 12;
4670                         break;
4671                 case 0x4:
4672                         vsc->bpc = 16;
4673                         break;
4674                 default:
4675                         MISSING_CASE(sdp->db[17] & 0x7);
4676                         return -EINVAL;
4677                 }
4678
4679                 vsc->content_type = sdp->db[18] & 0x7;
4680         } else {
4681                 return -EINVAL;
4682         }
4683
4684         return 0;
4685 }
4686
4687 static int
4688 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
4689                                            const void *buffer, size_t size)
4690 {
4691         int ret;
4692
4693         const struct dp_sdp *sdp = buffer;
4694
4695         if (size < sizeof(struct dp_sdp))
4696                 return -EINVAL;
4697
4698         if (sdp->sdp_header.HB0 != 0)
4699                 return -EINVAL;
4700
4701         if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
4702                 return -EINVAL;
4703
4704         /*
4705          * Least Significant Eight Bits of (Data Byte Count – 1)
4706          * 1Dh (i.e., Data Byte Count = 30 bytes).
4707          */
4708         if (sdp->sdp_header.HB2 != 0x1D)
4709                 return -EINVAL;
4710
4711         /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
4712         if ((sdp->sdp_header.HB3 & 0x3) != 0)
4713                 return -EINVAL;
4714
4715         /* INFOFRAME SDP Version Number */
4716         if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
4717                 return -EINVAL;
4718
4719         /* CTA Header Byte 2 (INFOFRAME Version Number) */
4720         if (sdp->db[0] != 1)
4721                 return -EINVAL;
4722
4723         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4724         if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
4725                 return -EINVAL;
4726
4727         ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
4728                                              HDMI_DRM_INFOFRAME_SIZE);
4729
4730         return ret;
4731 }
4732
4733 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
4734                                   struct intel_crtc_state *crtc_state,
4735                                   struct drm_dp_vsc_sdp *vsc)
4736 {
4737         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4738         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4739         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4740         unsigned int type = DP_SDP_VSC;
4741         struct dp_sdp sdp = {};
4742         int ret;
4743
4744         /* When PSR is enabled, VSC SDP is handled by PSR routine */
4745         if (intel_psr_enabled(intel_dp))
4746                 return;
4747
4748         if ((crtc_state->infoframes.enable &
4749              intel_hdmi_infoframe_enable(type)) == 0)
4750                 return;
4751
4752         dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
4753
4754         ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
4755
4756         if (ret)
4757                 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
4758 }
4759
4760 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
4761                                                      struct intel_crtc_state *crtc_state,
4762                                                      struct hdmi_drm_infoframe *drm_infoframe)
4763 {
4764         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4765         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4766         unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
4767         struct dp_sdp sdp = {};
4768         int ret;
4769
4770         if ((crtc_state->infoframes.enable &
4771             intel_hdmi_infoframe_enable(type)) == 0)
4772                 return;
4773
4774         dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
4775                                  sizeof(sdp));
4776
4777         ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
4778                                                          sizeof(sdp));
4779
4780         if (ret)
4781                 drm_dbg_kms(&dev_priv->drm,
4782                             "Failed to unpack DP HDR Metadata Infoframe SDP\n");
4783 }
4784
4785 void intel_read_dp_sdp(struct intel_encoder *encoder,
4786                        struct intel_crtc_state *crtc_state,
4787                        unsigned int type)
4788 {
4789         if (encoder->type != INTEL_OUTPUT_DDI)
4790                 return;
4791
4792         switch (type) {
4793         case DP_SDP_VSC:
4794                 intel_read_dp_vsc_sdp(encoder, crtc_state,
4795                                       &crtc_state->infoframes.vsc);
4796                 break;
4797         case HDMI_PACKET_TYPE_GAMUT_METADATA:
4798                 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
4799                                                          &crtc_state->infoframes.drm.drm);
4800                 break;
4801         default:
4802                 MISSING_CASE(type);
4803                 break;
4804         }
4805 }
4806
4807 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4808 {
4809         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4810         int status = 0;
4811         int test_link_rate;
4812         u8 test_lane_count, test_link_bw;
4813         /* (DP CTS 1.2)
4814          * 4.3.1.11
4815          */
4816         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4817         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4818                                    &test_lane_count);
4819
4820         if (status <= 0) {
4821                 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
4822                 return DP_TEST_NAK;
4823         }
4824         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4825
4826         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4827                                    &test_link_bw);
4828         if (status <= 0) {
4829                 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
4830                 return DP_TEST_NAK;
4831         }
4832         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4833
4834         /* Validate the requested link rate and lane count */
4835         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4836                                         test_lane_count))
4837                 return DP_TEST_NAK;
4838
4839         intel_dp->compliance.test_lane_count = test_lane_count;
4840         intel_dp->compliance.test_link_rate = test_link_rate;
4841
4842         return DP_TEST_ACK;
4843 }
4844
4845 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4846 {
4847         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4848         u8 test_pattern;
4849         u8 test_misc;
4850         __be16 h_width, v_height;
4851         int status = 0;
4852
4853         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4854         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4855                                    &test_pattern);
4856         if (status <= 0) {
4857                 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
4858                 return DP_TEST_NAK;
4859         }
4860         if (test_pattern != DP_COLOR_RAMP)
4861                 return DP_TEST_NAK;
4862
4863         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4864                                   &h_width, 2);
4865         if (status <= 0) {
4866                 drm_dbg_kms(&i915->drm, "H Width read failed\n");
4867                 return DP_TEST_NAK;
4868         }
4869
4870         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4871                                   &v_height, 2);
4872         if (status <= 0) {
4873                 drm_dbg_kms(&i915->drm, "V Height read failed\n");
4874                 return DP_TEST_NAK;
4875         }
4876
4877         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4878                                    &test_misc);
4879         if (status <= 0) {
4880                 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
4881                 return DP_TEST_NAK;
4882         }
4883         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4884                 return DP_TEST_NAK;
4885         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4886                 return DP_TEST_NAK;
4887         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4888         case DP_TEST_BIT_DEPTH_6:
4889                 intel_dp->compliance.test_data.bpc = 6;
4890                 break;
4891         case DP_TEST_BIT_DEPTH_8:
4892                 intel_dp->compliance.test_data.bpc = 8;
4893                 break;
4894         default:
4895                 return DP_TEST_NAK;
4896         }
4897
4898         intel_dp->compliance.test_data.video_pattern = test_pattern;
4899         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4900         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4901         /* Set test active flag here so userspace doesn't interrupt things */
4902         intel_dp->compliance.test_active = true;
4903
4904         return DP_TEST_ACK;
4905 }
4906
4907 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4908 {
4909         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4910         u8 test_result = DP_TEST_ACK;
4911         struct intel_connector *intel_connector = intel_dp->attached_connector;
4912         struct drm_connector *connector = &intel_connector->base;
4913
4914         if (intel_connector->detect_edid == NULL ||
4915             connector->edid_corrupt ||
4916             intel_dp->aux.i2c_defer_count > 6) {
4917                 /* Check EDID read for NACKs, DEFERs and corruption
4918                  * (DP CTS 1.2 Core r1.1)
4919                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4920                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4921                  *    4.2.2.6 : EDID corruption detected
4922                  * Use failsafe mode for all cases
4923                  */
4924                 if (intel_dp->aux.i2c_nack_count > 0 ||
4925                         intel_dp->aux.i2c_defer_count > 0)
4926                         drm_dbg_kms(&i915->drm,
4927                                     "EDID read had %d NACKs, %d DEFERs\n",
4928                                     intel_dp->aux.i2c_nack_count,
4929                                     intel_dp->aux.i2c_defer_count);
4930                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4931         } else {
4932                 struct edid *block = intel_connector->detect_edid;
4933
4934                 /* We have to write the checksum
4935                  * of the last block read
4936                  */
4937                 block += intel_connector->detect_edid->extensions;
4938
4939                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4940                                        block->checksum) <= 0)
4941                         drm_dbg_kms(&i915->drm,
4942                                     "Failed to write EDID checksum\n");
4943
4944                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4945                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4946         }
4947
4948         /* Set test active flag here so userspace doesn't interrupt things */
4949         intel_dp->compliance.test_active = true;
4950
4951         return test_result;
4952 }
4953
4954 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
4955                                         const struct intel_crtc_state *crtc_state)
4956 {
4957         struct drm_i915_private *dev_priv =
4958                         to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4959         struct drm_dp_phy_test_params *data =
4960                         &intel_dp->compliance.test_data.phytest;
4961         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4962         enum pipe pipe = crtc->pipe;
4963         u32 pattern_val;
4964
4965         switch (data->phy_pattern) {
4966         case DP_PHY_TEST_PATTERN_NONE:
4967                 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
4968                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
4969                 break;
4970         case DP_PHY_TEST_PATTERN_D10_2:
4971                 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
4972                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4973                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
4974                 break;
4975         case DP_PHY_TEST_PATTERN_ERROR_COUNT:
4976                 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
4977                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4978                                DDI_DP_COMP_CTL_ENABLE |
4979                                DDI_DP_COMP_CTL_SCRAMBLED_0);
4980                 break;
4981         case DP_PHY_TEST_PATTERN_PRBS7:
4982                 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
4983                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4984                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
4985                 break;
4986         case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
4987                 /*
4988                  * FIXME: Ideally pattern should come from DPCD 0x250. As
4989                  * current firmware of DPR-100 could not set it, so hardcoding
4990                  * now for complaince test.
4991                  */
4992                 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
4993                 pattern_val = 0x3e0f83e0;
4994                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
4995                 pattern_val = 0x0f83e0f8;
4996                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
4997                 pattern_val = 0x0000f83e;
4998                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
4999                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5000                                DDI_DP_COMP_CTL_ENABLE |
5001                                DDI_DP_COMP_CTL_CUSTOM80);
5002                 break;
5003         case DP_PHY_TEST_PATTERN_CP2520:
5004                 /*
5005                  * FIXME: Ideally pattern should come from DPCD 0x24A. As
5006                  * current firmware of DPR-100 could not set it, so hardcoding
5007                  * now for complaince test.
5008                  */
5009                 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
5010                 pattern_val = 0xFB;
5011                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5012                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
5013                                pattern_val);
5014                 break;
5015         default:
5016                 WARN(1, "Invalid Phy Test Pattern\n");
5017         }
5018 }
5019
5020 static void
5021 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
5022                                   const struct intel_crtc_state *crtc_state)
5023 {
5024         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5025         struct drm_device *dev = dig_port->base.base.dev;
5026         struct drm_i915_private *dev_priv = to_i915(dev);
5027         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5028         enum pipe pipe = crtc->pipe;
5029         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
5030
5031         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
5032                                                  TRANS_DDI_FUNC_CTL(pipe));
5033         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
5034         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
5035
5036         trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
5037                                       TGL_TRANS_DDI_PORT_MASK);
5038         trans_conf_value &= ~PIPECONF_ENABLE;
5039         dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
5040
5041         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
5042         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
5043                        trans_ddi_func_ctl_value);
5044         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
5045 }
5046
5047 static void
5048 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
5049                                  const struct intel_crtc_state *crtc_state)
5050 {
5051         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5052         struct drm_device *dev = dig_port->base.base.dev;
5053         struct drm_i915_private *dev_priv = to_i915(dev);
5054         enum port port = dig_port->base.port;
5055         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5056         enum pipe pipe = crtc->pipe;
5057         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
5058
5059         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
5060                                                  TRANS_DDI_FUNC_CTL(pipe));
5061         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
5062         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
5063
5064         trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
5065                                     TGL_TRANS_DDI_SELECT_PORT(port);
5066         trans_conf_value |= PIPECONF_ENABLE;
5067         dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
5068
5069         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
5070         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
5071         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
5072                        trans_ddi_func_ctl_value);
5073 }
5074
5075 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
5076                                          const struct intel_crtc_state *crtc_state)
5077 {
5078         struct drm_dp_phy_test_params *data =
5079                 &intel_dp->compliance.test_data.phytest;
5080         u8 link_status[DP_LINK_STATUS_SIZE];
5081
5082         if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
5083                                              link_status) < 0) {
5084                 DRM_DEBUG_KMS("failed to get link status\n");
5085                 return;
5086         }
5087
5088         /* retrieve vswing & pre-emphasis setting */
5089         intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
5090                                   link_status);
5091
5092         intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
5093
5094         intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
5095
5096         intel_dp_phy_pattern_update(intel_dp, crtc_state);
5097
5098         intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
5099
5100         drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
5101                                     link_status[DP_DPCD_REV]);
5102 }
5103
5104 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
5105 {
5106         struct drm_dp_phy_test_params *data =
5107                 &intel_dp->compliance.test_data.phytest;
5108
5109         if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
5110                 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
5111                 return DP_TEST_NAK;
5112         }
5113
5114         /* Set test active flag here so userspace doesn't interrupt things */
5115         intel_dp->compliance.test_active = true;
5116
5117         return DP_TEST_ACK;
5118 }
5119
5120 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
5121 {
5122         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5123         u8 response = DP_TEST_NAK;
5124         u8 request = 0;
5125         int status;
5126
5127         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
5128         if (status <= 0) {
5129                 drm_dbg_kms(&i915->drm,
5130                             "Could not read test request from sink\n");
5131                 goto update_status;
5132         }
5133
5134         switch (request) {
5135         case DP_TEST_LINK_TRAINING:
5136                 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
5137                 response = intel_dp_autotest_link_training(intel_dp);
5138                 break;
5139         case DP_TEST_LINK_VIDEO_PATTERN:
5140                 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
5141                 response = intel_dp_autotest_video_pattern(intel_dp);
5142                 break;
5143         case DP_TEST_LINK_EDID_READ:
5144                 drm_dbg_kms(&i915->drm, "EDID test requested\n");
5145                 response = intel_dp_autotest_edid(intel_dp);
5146                 break;
5147         case DP_TEST_LINK_PHY_TEST_PATTERN:
5148                 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
5149                 response = intel_dp_autotest_phy_pattern(intel_dp);
5150                 break;
5151         default:
5152                 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
5153                             request);
5154                 break;
5155         }
5156
5157         if (response & DP_TEST_ACK)
5158                 intel_dp->compliance.test_type = request;
5159
5160 update_status:
5161         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
5162         if (status <= 0)
5163                 drm_dbg_kms(&i915->drm,
5164                             "Could not write test response to sink\n");
5165 }
5166
5167 static void
5168 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
5169 {
5170                 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
5171
5172                 if (esi[1] & DP_CP_IRQ) {
5173                         intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5174                         *handled = true;
5175                 }
5176 }
5177
5178 /**
5179  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
5180  * @intel_dp: Intel DP struct
5181  *
5182  * Read any pending MST interrupts, call MST core to handle these and ack the
5183  * interrupts. Check if the main and AUX link state is ok.
5184  *
5185  * Returns:
5186  * - %true if pending interrupts were serviced (or no interrupts were
5187  *   pending) w/o detecting an error condition.
5188  * - %false if an error condition - like AUX failure or a loss of link - is
5189  *   detected, which needs servicing from the hotplug work.
5190  */
5191 static bool
5192 intel_dp_check_mst_status(struct intel_dp *intel_dp)
5193 {
5194         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5195         bool link_ok = true;
5196
5197         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
5198
5199         for (;;) {
5200                 u8 esi[DP_DPRX_ESI_LEN] = {};
5201                 bool handled;
5202                 int retry;
5203
5204                 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
5205                         drm_dbg_kms(&i915->drm,
5206                                     "failed to get ESI - device may have failed\n");
5207                         link_ok = false;
5208
5209                         break;
5210                 }
5211
5212                 /* check link status - esi[10] = 0x200c */
5213                 if (intel_dp->active_mst_links > 0 && link_ok &&
5214                     !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5215                         drm_dbg_kms(&i915->drm,
5216                                     "channel EQ not ok, retraining\n");
5217                         link_ok = false;
5218                 }
5219
5220                 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
5221
5222                 intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
5223
5224                 if (!handled)
5225                         break;
5226
5227                 for (retry = 0; retry < 3; retry++) {
5228                         int wret;
5229
5230                         wret = drm_dp_dpcd_write(&intel_dp->aux,
5231                                                  DP_SINK_COUNT_ESI+1,
5232                                                  &esi[1], 3);
5233                         if (wret == 3)
5234                                 break;
5235                 }
5236         }
5237
5238         return link_ok;
5239 }
5240
5241 static void
5242 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
5243 {
5244         bool is_active;
5245         u8 buf = 0;
5246
5247         is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
5248         if (intel_dp->frl.is_trained && !is_active) {
5249                 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
5250                         return;
5251
5252                 buf &=  ~DP_PCON_ENABLE_HDMI_LINK;
5253                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
5254                         return;
5255
5256                 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
5257
5258                 /* Restart FRL training or fall back to TMDS mode */
5259                 intel_dp_check_frl_training(intel_dp);
5260         }
5261 }
5262
5263 static bool
5264 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
5265 {
5266         u8 link_status[DP_LINK_STATUS_SIZE];
5267
5268         if (!intel_dp->link_trained)
5269                 return false;
5270
5271         /*
5272          * While PSR source HW is enabled, it will control main-link sending
5273          * frames, enabling and disabling it so trying to do a retrain will fail
5274          * as the link would or not be on or it could mix training patterns
5275          * and frame data at the same time causing retrain to fail.
5276          * Also when exiting PSR, HW will retrain the link anyways fixing
5277          * any link status error.
5278          */
5279         if (intel_psr_enabled(intel_dp))
5280                 return false;
5281
5282         if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
5283                                              link_status) < 0)
5284                 return false;
5285
5286         /*
5287          * Validate the cached values of intel_dp->link_rate and
5288          * intel_dp->lane_count before attempting to retrain.
5289          *
5290          * FIXME would be nice to user the crtc state here, but since
5291          * we need to call this from the short HPD handler that seems
5292          * a bit hard.
5293          */
5294         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5295                                         intel_dp->lane_count))
5296                 return false;
5297
5298         /* Retrain if Channel EQ or CR not ok */
5299         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
5300 }
5301
5302 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
5303                                    const struct drm_connector_state *conn_state)
5304 {
5305         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5306         struct intel_encoder *encoder;
5307         enum pipe pipe;
5308
5309         if (!conn_state->best_encoder)
5310                 return false;
5311
5312         /* SST */
5313         encoder = &dp_to_dig_port(intel_dp)->base;
5314         if (conn_state->best_encoder == &encoder->base)
5315                 return true;
5316
5317         /* MST */
5318         for_each_pipe(i915, pipe) {
5319                 encoder = &intel_dp->mst_encoders[pipe]->base;
5320                 if (conn_state->best_encoder == &encoder->base)
5321                         return true;
5322         }
5323
5324         return false;
5325 }
5326
5327 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
5328                                       struct drm_modeset_acquire_ctx *ctx,
5329                                       u32 *crtc_mask)
5330 {
5331         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5332         struct drm_connector_list_iter conn_iter;
5333         struct intel_connector *connector;
5334         int ret = 0;
5335
5336         *crtc_mask = 0;
5337
5338         if (!intel_dp_needs_link_retrain(intel_dp))
5339                 return 0;
5340
5341         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5342         for_each_intel_connector_iter(connector, &conn_iter) {
5343                 struct drm_connector_state *conn_state =
5344                         connector->base.state;
5345                 struct intel_crtc_state *crtc_state;
5346                 struct intel_crtc *crtc;
5347
5348                 if (!intel_dp_has_connector(intel_dp, conn_state))
5349                         continue;
5350
5351                 crtc = to_intel_crtc(conn_state->crtc);
5352                 if (!crtc)
5353                         continue;
5354
5355                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5356                 if (ret)
5357                         break;
5358
5359                 crtc_state = to_intel_crtc_state(crtc->base.state);
5360
5361                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5362
5363                 if (!crtc_state->hw.active)
5364                         continue;
5365
5366                 if (conn_state->commit &&
5367                     !try_wait_for_completion(&conn_state->commit->hw_done))
5368                         continue;
5369
5370                 *crtc_mask |= drm_crtc_mask(&crtc->base);
5371         }
5372         drm_connector_list_iter_end(&conn_iter);
5373
5374         if (!intel_dp_needs_link_retrain(intel_dp))
5375                 *crtc_mask = 0;
5376
5377         return ret;
5378 }
5379
5380 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
5381 {
5382         struct intel_connector *connector = intel_dp->attached_connector;
5383
5384         return connector->base.status == connector_status_connected ||
5385                 intel_dp->is_mst;
5386 }
5387
5388 int intel_dp_retrain_link(struct intel_encoder *encoder,
5389                           struct drm_modeset_acquire_ctx *ctx)
5390 {
5391         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5392         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5393         struct intel_crtc *crtc;
5394         u32 crtc_mask;
5395         int ret;
5396
5397         if (!intel_dp_is_connected(intel_dp))
5398                 return 0;
5399
5400         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5401                                ctx);
5402         if (ret)
5403                 return ret;
5404
5405         ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
5406         if (ret)
5407                 return ret;
5408
5409         if (crtc_mask == 0)
5410                 return 0;
5411
5412         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
5413                     encoder->base.base.id, encoder->base.name);
5414
5415         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5416                 const struct intel_crtc_state *crtc_state =
5417                         to_intel_crtc_state(crtc->base.state);
5418
5419                 /* Suppress underruns caused by re-training */
5420                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5421                 if (crtc_state->has_pch_encoder)
5422                         intel_set_pch_fifo_underrun_reporting(dev_priv,
5423                                                               intel_crtc_pch_transcoder(crtc), false);
5424         }
5425
5426         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5427                 const struct intel_crtc_state *crtc_state =
5428                         to_intel_crtc_state(crtc->base.state);
5429
5430                 /* retrain on the MST master transcoder */
5431                 if (INTEL_GEN(dev_priv) >= 12 &&
5432                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5433                     !intel_dp_mst_is_master_trans(crtc_state))
5434                         continue;
5435
5436                 intel_dp_check_frl_training(intel_dp);
5437                 intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
5438                 intel_dp_start_link_train(intel_dp, crtc_state);
5439                 intel_dp_stop_link_train(intel_dp, crtc_state);
5440                 break;
5441         }
5442
5443         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5444                 const struct intel_crtc_state *crtc_state =
5445                         to_intel_crtc_state(crtc->base.state);
5446
5447                 /* Keep underrun reporting disabled until things are stable */
5448                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5449
5450                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
5451                 if (crtc_state->has_pch_encoder)
5452                         intel_set_pch_fifo_underrun_reporting(dev_priv,
5453                                                               intel_crtc_pch_transcoder(crtc), true);
5454         }
5455
5456         return 0;
5457 }
5458
5459 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
5460                                   struct drm_modeset_acquire_ctx *ctx,
5461                                   u32 *crtc_mask)
5462 {
5463         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5464         struct drm_connector_list_iter conn_iter;
5465         struct intel_connector *connector;
5466         int ret = 0;
5467
5468         *crtc_mask = 0;
5469
5470         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5471         for_each_intel_connector_iter(connector, &conn_iter) {
5472                 struct drm_connector_state *conn_state =
5473                         connector->base.state;
5474                 struct intel_crtc_state *crtc_state;
5475                 struct intel_crtc *crtc;
5476
5477                 if (!intel_dp_has_connector(intel_dp, conn_state))
5478                         continue;
5479
5480                 crtc = to_intel_crtc(conn_state->crtc);
5481                 if (!crtc)
5482                         continue;
5483
5484                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5485                 if (ret)
5486                         break;
5487
5488                 crtc_state = to_intel_crtc_state(crtc->base.state);
5489
5490                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5491
5492                 if (!crtc_state->hw.active)
5493                         continue;
5494
5495                 if (conn_state->commit &&
5496                     !try_wait_for_completion(&conn_state->commit->hw_done))
5497                         continue;
5498
5499                 *crtc_mask |= drm_crtc_mask(&crtc->base);
5500         }
5501         drm_connector_list_iter_end(&conn_iter);
5502
5503         return ret;
5504 }
5505
5506 static int intel_dp_do_phy_test(struct intel_encoder *encoder,
5507                                 struct drm_modeset_acquire_ctx *ctx)
5508 {
5509         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5510         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5511         struct intel_crtc *crtc;
5512         u32 crtc_mask;
5513         int ret;
5514
5515         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5516                                ctx);
5517         if (ret)
5518                 return ret;
5519
5520         ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
5521         if (ret)
5522                 return ret;
5523
5524         if (crtc_mask == 0)
5525                 return 0;
5526
5527         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
5528                     encoder->base.base.id, encoder->base.name);
5529
5530         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5531                 const struct intel_crtc_state *crtc_state =
5532                         to_intel_crtc_state(crtc->base.state);
5533
5534                 /* test on the MST master transcoder */
5535                 if (INTEL_GEN(dev_priv) >= 12 &&
5536                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5537                     !intel_dp_mst_is_master_trans(crtc_state))
5538                         continue;
5539
5540                 intel_dp_process_phy_request(intel_dp, crtc_state);
5541                 break;
5542         }
5543
5544         return 0;
5545 }
5546
5547 static void intel_dp_phy_test(struct intel_encoder *encoder)
5548 {
5549         struct drm_modeset_acquire_ctx ctx;
5550         int ret;
5551
5552         drm_modeset_acquire_init(&ctx, 0);
5553
5554         for (;;) {
5555                 ret = intel_dp_do_phy_test(encoder, &ctx);
5556
5557                 if (ret == -EDEADLK) {
5558                         drm_modeset_backoff(&ctx);
5559                         continue;
5560                 }
5561
5562                 break;
5563         }
5564
5565         drm_modeset_drop_locks(&ctx);
5566         drm_modeset_acquire_fini(&ctx);
5567         drm_WARN(encoder->base.dev, ret,
5568                  "Acquiring modeset locks failed with %i\n", ret);
5569 }
5570
5571 /*
5572  * If display is now connected check links status,
5573  * there has been known issues of link loss triggering
5574  * long pulse.
5575  *
5576  * Some sinks (eg. ASUS PB287Q) seem to perform some
5577  * weird HPD ping pong during modesets. So we can apparently
5578  * end up with HPD going low during a modeset, and then
5579  * going back up soon after. And once that happens we must
5580  * retrain the link to get a picture. That's in case no
5581  * userspace component reacted to intermittent HPD dip.
5582  */
5583 static enum intel_hotplug_state
5584 intel_dp_hotplug(struct intel_encoder *encoder,
5585                  struct intel_connector *connector)
5586 {
5587         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5588         struct drm_modeset_acquire_ctx ctx;
5589         enum intel_hotplug_state state;
5590         int ret;
5591
5592         if (intel_dp->compliance.test_active &&
5593             intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
5594                 intel_dp_phy_test(encoder);
5595                 /* just do the PHY test and nothing else */
5596                 return INTEL_HOTPLUG_UNCHANGED;
5597         }
5598
5599         state = intel_encoder_hotplug(encoder, connector);
5600
5601         drm_modeset_acquire_init(&ctx, 0);
5602
5603         for (;;) {
5604                 ret = intel_dp_retrain_link(encoder, &ctx);
5605
5606                 if (ret == -EDEADLK) {
5607                         drm_modeset_backoff(&ctx);
5608                         continue;
5609                 }
5610
5611                 break;
5612         }
5613
5614         drm_modeset_drop_locks(&ctx);
5615         drm_modeset_acquire_fini(&ctx);
5616         drm_WARN(encoder->base.dev, ret,
5617                  "Acquiring modeset locks failed with %i\n", ret);
5618
5619         /*
5620          * Keeping it consistent with intel_ddi_hotplug() and
5621          * intel_hdmi_hotplug().
5622          */
5623         if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
5624                 state = INTEL_HOTPLUG_RETRY;
5625
5626         return state;
5627 }
5628
5629 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
5630 {
5631         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5632         u8 val;
5633
5634         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5635                 return;
5636
5637         if (drm_dp_dpcd_readb(&intel_dp->aux,
5638                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
5639                 return;
5640
5641         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
5642
5643         if (val & DP_AUTOMATED_TEST_REQUEST)
5644                 intel_dp_handle_test_request(intel_dp);
5645
5646         if (val & DP_CP_IRQ)
5647                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5648
5649         if (val & DP_SINK_SPECIFIC_IRQ)
5650                 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
5651 }
5652
5653 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
5654 {
5655         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5656         u8 val;
5657
5658         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5659                 return;
5660
5661         if (drm_dp_dpcd_readb(&intel_dp->aux,
5662                               DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
5663                 drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
5664                 return;
5665         }
5666
5667         if (drm_dp_dpcd_writeb(&intel_dp->aux,
5668                                DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
5669                 drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
5670                 return;
5671         }
5672
5673         if (val & HDMI_LINK_STATUS_CHANGED)
5674                 intel_dp_handle_hdmi_link_status_change(intel_dp);
5675 }
5676
5677 /*
5678  * According to DP spec
5679  * 5.1.2:
5680  *  1. Read DPCD
5681  *  2. Configure link according to Receiver Capabilities
5682  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
5683  *  4. Check link status on receipt of hot-plug interrupt
5684  *
5685  * intel_dp_short_pulse -  handles short pulse interrupts
5686  * when full detection is not required.
5687  * Returns %true if short pulse is handled and full detection
5688  * is NOT required and %false otherwise.
5689  */
5690 static bool
5691 intel_dp_short_pulse(struct intel_dp *intel_dp)
5692 {
5693         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5694         u8 old_sink_count = intel_dp->sink_count;
5695         bool ret;
5696
5697         /*
5698          * Clearing compliance test variables to allow capturing
5699          * of values for next automated test request.
5700          */
5701         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5702
5703         /*
5704          * Now read the DPCD to see if it's actually running
5705          * If the current value of sink count doesn't match with
5706          * the value that was stored earlier or dpcd read failed
5707          * we need to do full detection
5708          */
5709         ret = intel_dp_get_dpcd(intel_dp);
5710
5711         if ((old_sink_count != intel_dp->sink_count) || !ret) {
5712                 /* No need to proceed if we are going to do full detect */
5713                 return false;
5714         }
5715
5716         intel_dp_check_device_service_irq(intel_dp);
5717         intel_dp_check_link_service_irq(intel_dp);
5718
5719         /* Handle CEC interrupts, if any */
5720         drm_dp_cec_irq(&intel_dp->aux);
5721
5722         /* defer to the hotplug work for link retraining if needed */
5723         if (intel_dp_needs_link_retrain(intel_dp))
5724                 return false;
5725
5726         intel_psr_short_pulse(intel_dp);
5727
5728         switch (intel_dp->compliance.test_type) {
5729         case DP_TEST_LINK_TRAINING:
5730                 drm_dbg_kms(&dev_priv->drm,
5731                             "Link Training Compliance Test requested\n");
5732                 /* Send a Hotplug Uevent to userspace to start modeset */
5733                 drm_kms_helper_hotplug_event(&dev_priv->drm);
5734                 break;
5735         case DP_TEST_LINK_PHY_TEST_PATTERN:
5736                 drm_dbg_kms(&dev_priv->drm,
5737                             "PHY test pattern Compliance Test requested\n");
5738                 /*
5739                  * Schedule long hpd to do the test
5740                  *
5741                  * FIXME get rid of the ad-hoc phy test modeset code
5742                  * and properly incorporate it into the normal modeset.
5743                  */
5744                 return false;
5745         }
5746
5747         return true;
5748 }
5749
5750 /* XXX this is probably wrong for multiple downstream ports */
5751 static enum drm_connector_status
5752 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5753 {
5754         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5755         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5756         u8 *dpcd = intel_dp->dpcd;
5757         u8 type;
5758
5759         if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
5760                 return connector_status_connected;
5761
5762         lspcon_resume(dig_port);
5763
5764         if (!intel_dp_get_dpcd(intel_dp))
5765                 return connector_status_disconnected;
5766
5767         /* if there's no downstream port, we're done */
5768         if (!drm_dp_is_branch(dpcd))
5769                 return connector_status_connected;
5770
5771         /* If we're HPD-aware, SINK_COUNT changes dynamically */
5772         if (intel_dp_has_sink_count(intel_dp) &&
5773             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5774                 return intel_dp->sink_count ?
5775                 connector_status_connected : connector_status_disconnected;
5776         }
5777
5778         if (intel_dp_can_mst(intel_dp))
5779                 return connector_status_connected;
5780
5781         /* If no HPD, poke DDC gently */
5782         if (drm_probe_ddc(&intel_dp->aux.ddc))
5783                 return connector_status_connected;
5784
5785         /* Well we tried, say unknown for unreliable port types */
5786         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5787                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5788                 if (type == DP_DS_PORT_TYPE_VGA ||
5789                     type == DP_DS_PORT_TYPE_NON_EDID)
5790                         return connector_status_unknown;
5791         } else {
5792                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5793                         DP_DWN_STRM_PORT_TYPE_MASK;
5794                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5795                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
5796                         return connector_status_unknown;
5797         }
5798
5799         /* Anything else is out of spec, warn and ignore */
5800         drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
5801         return connector_status_disconnected;
5802 }
5803
5804 static enum drm_connector_status
5805 edp_detect(struct intel_dp *intel_dp)
5806 {
5807         return connector_status_connected;
5808 }
5809
5810 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5811 {
5812         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5813         u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
5814
5815         return intel_de_read(dev_priv, SDEISR) & bit;
5816 }
5817
5818 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5819 {
5820         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5821         u32 bit;
5822
5823         switch (encoder->hpd_pin) {
5824         case HPD_PORT_B:
5825                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5826                 break;
5827         case HPD_PORT_C:
5828                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5829                 break;
5830         case HPD_PORT_D:
5831                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5832                 break;
5833         default:
5834                 MISSING_CASE(encoder->hpd_pin);
5835                 return false;
5836         }
5837
5838         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
5839 }
5840
5841 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5842 {
5843         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5844         u32 bit;
5845
5846         switch (encoder->hpd_pin) {
5847         case HPD_PORT_B:
5848                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5849                 break;
5850         case HPD_PORT_C:
5851                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5852                 break;
5853         case HPD_PORT_D:
5854                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5855                 break;
5856         default:
5857                 MISSING_CASE(encoder->hpd_pin);
5858                 return false;
5859         }
5860
5861         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
5862 }
5863
5864 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5865 {
5866         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5867         u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
5868
5869         return intel_de_read(dev_priv, DEISR) & bit;
5870 }
5871
5872 /*
5873  * intel_digital_port_connected - is the specified port connected?
5874  * @encoder: intel_encoder
5875  *
5876  * In cases where there's a connector physically connected but it can't be used
5877  * by our hardware we also return false, since the rest of the driver should
5878  * pretty much treat the port as disconnected. This is relevant for type-C
5879  * (starting on ICL) where there's ownership involved.
5880  *
5881  * Return %true if port is connected, %false otherwise.
5882  */
5883 bool intel_digital_port_connected(struct intel_encoder *encoder)
5884 {
5885         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5886         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5887         bool is_connected = false;
5888         intel_wakeref_t wakeref;
5889
5890         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5891                 is_connected = dig_port->connected(encoder);
5892
5893         return is_connected;
5894 }
5895
5896 static struct edid *
5897 intel_dp_get_edid(struct intel_dp *intel_dp)
5898 {
5899         struct intel_connector *intel_connector = intel_dp->attached_connector;
5900
5901         /* use cached edid if we have one */
5902         if (intel_connector->edid) {
5903                 /* invalid edid */
5904                 if (IS_ERR(intel_connector->edid))
5905                         return NULL;
5906
5907                 return drm_edid_duplicate(intel_connector->edid);
5908         } else
5909                 return drm_get_edid(&intel_connector->base,
5910                                     &intel_dp->aux.ddc);
5911 }
5912
5913 static void
5914 intel_dp_update_dfp(struct intel_dp *intel_dp,
5915                     const struct edid *edid)
5916 {
5917         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5918         struct intel_connector *connector = intel_dp->attached_connector;
5919
5920         intel_dp->dfp.max_bpc =
5921                 drm_dp_downstream_max_bpc(intel_dp->dpcd,
5922                                           intel_dp->downstream_ports, edid);
5923
5924         intel_dp->dfp.max_dotclock =
5925                 drm_dp_downstream_max_dotclock(intel_dp->dpcd,
5926                                                intel_dp->downstream_ports);
5927
5928         intel_dp->dfp.min_tmds_clock =
5929                 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
5930                                                  intel_dp->downstream_ports,
5931                                                  edid);
5932         intel_dp->dfp.max_tmds_clock =
5933                 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
5934                                                  intel_dp->downstream_ports,
5935                                                  edid);
5936
5937         intel_dp->dfp.pcon_max_frl_bw =
5938                 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
5939                                            intel_dp->downstream_ports);
5940
5941         drm_dbg_kms(&i915->drm,
5942                     "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
5943                     connector->base.base.id, connector->base.name,
5944                     intel_dp->dfp.max_bpc,
5945                     intel_dp->dfp.max_dotclock,
5946                     intel_dp->dfp.min_tmds_clock,
5947                     intel_dp->dfp.max_tmds_clock,
5948                     intel_dp->dfp.pcon_max_frl_bw);
5949
5950         intel_dp_get_pcon_dsc_cap(intel_dp);
5951 }
5952
5953 static void
5954 intel_dp_update_420(struct intel_dp *intel_dp)
5955 {
5956         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5957         struct intel_connector *connector = intel_dp->attached_connector;
5958         bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
5959
5960         /* No YCbCr output support on gmch platforms */
5961         if (HAS_GMCH(i915))
5962                 return;
5963
5964         /*
5965          * ILK doesn't seem capable of DP YCbCr output. The
5966          * displayed image is severly corrupted. SNB+ is fine.
5967          */
5968         if (IS_GEN(i915, 5))
5969                 return;
5970
5971         is_branch = drm_dp_is_branch(intel_dp->dpcd);
5972         ycbcr_420_passthrough =
5973                 drm_dp_downstream_420_passthrough(intel_dp->dpcd,
5974                                                   intel_dp->downstream_ports);
5975         /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
5976         ycbcr_444_to_420 =
5977                 dp_to_dig_port(intel_dp)->lspcon.active ||
5978                 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
5979                                                         intel_dp->downstream_ports);
5980         rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
5981                                                                  intel_dp->downstream_ports,
5982                                                                  DP_DS_HDMI_BT601_RGB_YCBCR_CONV ||
5983                                                                  DP_DS_HDMI_BT709_RGB_YCBCR_CONV ||
5984                                                                  DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
5985
5986         if (INTEL_GEN(i915) >= 11) {
5987                 /* Let PCON convert from RGB->YCbCr if possible */
5988                 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
5989                         intel_dp->dfp.rgb_to_ycbcr = true;
5990                         intel_dp->dfp.ycbcr_444_to_420 = true;
5991                         connector->base.ycbcr_420_allowed = true;
5992                 } else {
5993                 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
5994                         intel_dp->dfp.ycbcr_444_to_420 =
5995                                 ycbcr_444_to_420 && !ycbcr_420_passthrough;
5996
5997                         connector->base.ycbcr_420_allowed =
5998                                 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
5999                 }
6000         } else {
6001                 /* 4:4:4->4:2:0 conversion is the only way */
6002                 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
6003
6004                 connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
6005         }
6006
6007         drm_dbg_kms(&i915->drm,
6008                     "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
6009                     connector->base.base.id, connector->base.name,
6010                     yesno(intel_dp->dfp.rgb_to_ycbcr),
6011                     yesno(connector->base.ycbcr_420_allowed),
6012                     yesno(intel_dp->dfp.ycbcr_444_to_420));
6013 }
6014
6015 static void
6016 intel_dp_set_edid(struct intel_dp *intel_dp)
6017 {
6018         struct intel_connector *connector = intel_dp->attached_connector;
6019         struct edid *edid;
6020
6021         intel_dp_unset_edid(intel_dp);
6022         edid = intel_dp_get_edid(intel_dp);
6023         connector->detect_edid = edid;
6024
6025         intel_dp_update_dfp(intel_dp, edid);
6026         intel_dp_update_420(intel_dp);
6027
6028         if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
6029                 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
6030                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
6031         }
6032
6033         drm_dp_cec_set_edid(&intel_dp->aux, edid);
6034         intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
6035 }
6036
6037 static void
6038 intel_dp_unset_edid(struct intel_dp *intel_dp)
6039 {
6040         struct intel_connector *connector = intel_dp->attached_connector;
6041
6042         drm_dp_cec_unset_edid(&intel_dp->aux);
6043         kfree(connector->detect_edid);
6044         connector->detect_edid = NULL;
6045
6046         intel_dp->has_hdmi_sink = false;
6047         intel_dp->has_audio = false;
6048         intel_dp->edid_quirks = 0;
6049
6050         intel_dp->dfp.max_bpc = 0;
6051         intel_dp->dfp.max_dotclock = 0;
6052         intel_dp->dfp.min_tmds_clock = 0;
6053         intel_dp->dfp.max_tmds_clock = 0;
6054
6055         intel_dp->dfp.pcon_max_frl_bw = 0;
6056
6057         intel_dp->dfp.ycbcr_444_to_420 = false;
6058         connector->base.ycbcr_420_allowed = false;
6059 }
6060
6061 static int
6062 intel_dp_detect(struct drm_connector *connector,
6063                 struct drm_modeset_acquire_ctx *ctx,
6064                 bool force)
6065 {
6066         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6067         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6068         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6069         struct intel_encoder *encoder = &dig_port->base;
6070         enum drm_connector_status status;
6071
6072         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6073                     connector->base.id, connector->name);
6074         drm_WARN_ON(&dev_priv->drm,
6075                     !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
6076
6077         if (!INTEL_DISPLAY_ENABLED(dev_priv))
6078                 return connector_status_disconnected;
6079
6080         /* Can't disconnect eDP */
6081         if (intel_dp_is_edp(intel_dp))
6082                 status = edp_detect(intel_dp);
6083         else if (intel_digital_port_connected(encoder))
6084                 status = intel_dp_detect_dpcd(intel_dp);
6085         else
6086                 status = connector_status_disconnected;
6087
6088         if (status == connector_status_disconnected) {
6089                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
6090                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
6091
6092                 if (intel_dp->is_mst) {
6093                         drm_dbg_kms(&dev_priv->drm,
6094                                     "MST device may have disappeared %d vs %d\n",
6095                                     intel_dp->is_mst,
6096                                     intel_dp->mst_mgr.mst_state);
6097                         intel_dp->is_mst = false;
6098                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6099                                                         intel_dp->is_mst);
6100                 }
6101
6102                 goto out;
6103         }
6104
6105         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
6106         if (INTEL_GEN(dev_priv) >= 11)
6107                 intel_dp_get_dsc_sink_cap(intel_dp);
6108
6109         intel_dp_configure_mst(intel_dp);
6110
6111         /*
6112          * TODO: Reset link params when switching to MST mode, until MST
6113          * supports link training fallback params.
6114          */
6115         if (intel_dp->reset_link_params || intel_dp->is_mst) {
6116                 /* Initial max link lane count */
6117                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
6118
6119                 /* Initial max link rate */
6120                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
6121
6122                 intel_dp->reset_link_params = false;
6123         }
6124
6125         intel_dp_print_rates(intel_dp);
6126
6127         if (intel_dp->is_mst) {
6128                 /*
6129                  * If we are in MST mode then this connector
6130                  * won't appear connected or have anything
6131                  * with EDID on it
6132                  */
6133                 status = connector_status_disconnected;
6134                 goto out;
6135         }
6136
6137         /*
6138          * Some external monitors do not signal loss of link synchronization
6139          * with an IRQ_HPD, so force a link status check.
6140          */
6141         if (!intel_dp_is_edp(intel_dp)) {
6142                 int ret;
6143
6144                 ret = intel_dp_retrain_link(encoder, ctx);
6145                 if (ret)
6146                         return ret;
6147         }
6148
6149         /*
6150          * Clearing NACK and defer counts to get their exact values
6151          * while reading EDID which are required by Compliance tests
6152          * 4.2.2.4 and 4.2.2.5
6153          */
6154         intel_dp->aux.i2c_nack_count = 0;
6155         intel_dp->aux.i2c_defer_count = 0;
6156
6157         intel_dp_set_edid(intel_dp);
6158         if (intel_dp_is_edp(intel_dp) ||
6159             to_intel_connector(connector)->detect_edid)
6160                 status = connector_status_connected;
6161
6162         intel_dp_check_device_service_irq(intel_dp);
6163
6164 out:
6165         if (status != connector_status_connected && !intel_dp->is_mst)
6166                 intel_dp_unset_edid(intel_dp);
6167
6168         /*
6169          * Make sure the refs for power wells enabled during detect are
6170          * dropped to avoid a new detect cycle triggered by HPD polling.
6171          */
6172         intel_display_power_flush_work(dev_priv);
6173
6174         if (!intel_dp_is_edp(intel_dp))
6175                 drm_dp_set_subconnector_property(connector,
6176                                                  status,
6177                                                  intel_dp->dpcd,
6178                                                  intel_dp->downstream_ports);
6179         return status;
6180 }
6181
6182 static void
6183 intel_dp_force(struct drm_connector *connector)
6184 {
6185         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6186         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6187         struct intel_encoder *intel_encoder = &dig_port->base;
6188         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
6189         enum intel_display_power_domain aux_domain =
6190                 intel_aux_power_domain(dig_port);
6191         intel_wakeref_t wakeref;
6192
6193         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6194                     connector->base.id, connector->name);
6195         intel_dp_unset_edid(intel_dp);
6196
6197         if (connector->status != connector_status_connected)
6198                 return;
6199
6200         wakeref = intel_display_power_get(dev_priv, aux_domain);
6201
6202         intel_dp_set_edid(intel_dp);
6203
6204         intel_display_power_put(dev_priv, aux_domain, wakeref);
6205 }
6206
6207 static int intel_dp_get_modes(struct drm_connector *connector)
6208 {
6209         struct intel_connector *intel_connector = to_intel_connector(connector);
6210         struct edid *edid;
6211
6212         edid = intel_connector->detect_edid;
6213         if (edid) {
6214                 int ret = intel_connector_update_modes(connector, edid);
6215                 if (ret)
6216                         return ret;
6217         }
6218
6219         /* if eDP has no EDID, fall back to fixed mode */
6220         if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
6221             intel_connector->panel.fixed_mode) {
6222                 struct drm_display_mode *mode;
6223
6224                 mode = drm_mode_duplicate(connector->dev,
6225                                           intel_connector->panel.fixed_mode);
6226                 if (mode) {
6227                         drm_mode_probed_add(connector, mode);
6228                         return 1;
6229                 }
6230         }
6231
6232         if (!edid) {
6233                 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
6234                 struct drm_display_mode *mode;
6235
6236                 mode = drm_dp_downstream_mode(connector->dev,
6237                                               intel_dp->dpcd,
6238                                               intel_dp->downstream_ports);
6239                 if (mode) {
6240                         drm_mode_probed_add(connector, mode);
6241                         return 1;
6242                 }
6243         }
6244
6245         return 0;
6246 }
6247
6248 static int
6249 intel_dp_connector_register(struct drm_connector *connector)
6250 {
6251         struct drm_i915_private *i915 = to_i915(connector->dev);
6252         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6253         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6254         struct intel_lspcon *lspcon = &dig_port->lspcon;
6255         int ret;
6256
6257         ret = intel_connector_register(connector);
6258         if (ret)
6259                 return ret;
6260
6261         drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
6262                     intel_dp->aux.name, connector->kdev->kobj.name);
6263
6264         intel_dp->aux.dev = connector->kdev;
6265         ret = drm_dp_aux_register(&intel_dp->aux);
6266         if (!ret)
6267                 drm_dp_cec_register_connector(&intel_dp->aux, connector);
6268
6269         if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
6270                 return ret;
6271
6272         /*
6273          * ToDo: Clean this up to handle lspcon init and resume more
6274          * efficiently and streamlined.
6275          */
6276         if (lspcon_init(dig_port)) {
6277                 lspcon_detect_hdr_capability(lspcon);
6278                 if (lspcon->hdr_supported)
6279                         drm_object_attach_property(&connector->base,
6280                                                    connector->dev->mode_config.hdr_output_metadata_property,
6281                                                    0);
6282         }
6283
6284         return ret;
6285 }
6286
6287 static void
6288 intel_dp_connector_unregister(struct drm_connector *connector)
6289 {
6290         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6291
6292         drm_dp_cec_unregister_connector(&intel_dp->aux);
6293         drm_dp_aux_unregister(&intel_dp->aux);
6294         intel_connector_unregister(connector);
6295 }
6296
6297 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
6298 {
6299         struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
6300         struct intel_dp *intel_dp = &dig_port->dp;
6301
6302         intel_dp_mst_encoder_cleanup(dig_port);
6303
6304         intel_pps_vdd_off_sync(intel_dp);
6305
6306         intel_dp_aux_fini(intel_dp);
6307 }
6308
6309 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
6310 {
6311         intel_dp_encoder_flush_work(encoder);
6312
6313         drm_encoder_cleanup(encoder);
6314         kfree(enc_to_dig_port(to_intel_encoder(encoder)));
6315 }
6316
6317 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
6318 {
6319         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6320
6321         intel_pps_vdd_off_sync(intel_dp);
6322 }
6323
6324 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
6325 {
6326         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6327
6328         intel_pps_wait_power_cycle(intel_dp);
6329 }
6330
6331 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6332 {
6333         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6334         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6335         enum pipe pipe;
6336
6337         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6338                                   encoder->port, &pipe))
6339                 return pipe;
6340
6341         return INVALID_PIPE;
6342 }
6343
6344 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6345 {
6346         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6347         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
6348
6349         if (!HAS_DDI(dev_priv))
6350                 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
6351
6352         intel_dp->reset_link_params = true;
6353
6354         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6355                 intel_wakeref_t wakeref;
6356
6357                 with_intel_pps_lock(intel_dp, wakeref)
6358                         intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6359         }
6360
6361         intel_pps_encoder_reset(intel_dp);
6362 }
6363
6364 static int intel_modeset_tile_group(struct intel_atomic_state *state,
6365                                     int tile_group_id)
6366 {
6367         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6368         struct drm_connector_list_iter conn_iter;
6369         struct drm_connector *connector;
6370         int ret = 0;
6371
6372         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
6373         drm_for_each_connector_iter(connector, &conn_iter) {
6374                 struct drm_connector_state *conn_state;
6375                 struct intel_crtc_state *crtc_state;
6376                 struct intel_crtc *crtc;
6377
6378                 if (!connector->has_tile ||
6379                     connector->tile_group->id != tile_group_id)
6380                         continue;
6381
6382                 conn_state = drm_atomic_get_connector_state(&state->base,
6383                                                             connector);
6384                 if (IS_ERR(conn_state)) {
6385                         ret = PTR_ERR(conn_state);
6386                         break;
6387                 }
6388
6389                 crtc = to_intel_crtc(conn_state->crtc);
6390
6391                 if (!crtc)
6392                         continue;
6393
6394                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6395                 crtc_state->uapi.mode_changed = true;
6396
6397                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6398                 if (ret)
6399                         break;
6400         }
6401         drm_connector_list_iter_end(&conn_iter);
6402
6403         return ret;
6404 }
6405
6406 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
6407 {
6408         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6409         struct intel_crtc *crtc;
6410
6411         if (transcoders == 0)
6412                 return 0;
6413
6414         for_each_intel_crtc(&dev_priv->drm, crtc) {
6415                 struct intel_crtc_state *crtc_state;
6416                 int ret;
6417
6418                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6419                 if (IS_ERR(crtc_state))
6420                         return PTR_ERR(crtc_state);
6421
6422                 if (!crtc_state->hw.enable)
6423                         continue;
6424
6425                 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
6426                         continue;
6427
6428                 crtc_state->uapi.mode_changed = true;
6429
6430                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6431                 if (ret)
6432                         return ret;
6433
6434                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6435                 if (ret)
6436                         return ret;
6437
6438                 transcoders &= ~BIT(crtc_state->cpu_transcoder);
6439         }
6440
6441         drm_WARN_ON(&dev_priv->drm, transcoders != 0);
6442
6443         return 0;
6444 }
6445
6446 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
6447                                       struct drm_connector *connector)
6448 {
6449         const struct drm_connector_state *old_conn_state =
6450                 drm_atomic_get_old_connector_state(&state->base, connector);
6451         const struct intel_crtc_state *old_crtc_state;
6452         struct intel_crtc *crtc;
6453         u8 transcoders;
6454
6455         crtc = to_intel_crtc(old_conn_state->crtc);
6456         if (!crtc)
6457                 return 0;
6458
6459         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6460
6461         if (!old_crtc_state->hw.active)
6462                 return 0;
6463
6464         transcoders = old_crtc_state->sync_mode_slaves_mask;
6465         if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6466                 transcoders |= BIT(old_crtc_state->master_transcoder);
6467
6468         return intel_modeset_affected_transcoders(state,
6469                                                   transcoders);
6470 }
6471
6472 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6473                                            struct drm_atomic_state *_state)
6474 {
6475         struct drm_i915_private *dev_priv = to_i915(conn->dev);
6476         struct intel_atomic_state *state = to_intel_atomic_state(_state);
6477         int ret;
6478
6479         ret = intel_digital_connector_atomic_check(conn, &state->base);
6480         if (ret)
6481                 return ret;
6482
6483         /*
6484          * We don't enable port sync on BDW due to missing w/as and
6485          * due to not having adjusted the modeset sequence appropriately.
6486          */
6487         if (INTEL_GEN(dev_priv) < 9)
6488                 return 0;
6489
6490         if (!intel_connector_needs_modeset(state, conn))
6491                 return 0;
6492
6493         if (conn->has_tile) {
6494                 ret = intel_modeset_tile_group(state, conn->tile_group->id);
6495                 if (ret)
6496                         return ret;
6497         }
6498
6499         return intel_modeset_synced_crtcs(state, conn);
6500 }
6501
6502 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6503         .force = intel_dp_force,
6504         .fill_modes = drm_helper_probe_single_connector_modes,
6505         .atomic_get_property = intel_digital_connector_atomic_get_property,
6506         .atomic_set_property = intel_digital_connector_atomic_set_property,
6507         .late_register = intel_dp_connector_register,
6508         .early_unregister = intel_dp_connector_unregister,
6509         .destroy = intel_connector_destroy,
6510         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6511         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6512 };
6513
6514 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6515         .detect_ctx = intel_dp_detect,
6516         .get_modes = intel_dp_get_modes,
6517         .mode_valid = intel_dp_mode_valid,
6518         .atomic_check = intel_dp_connector_atomic_check,
6519 };
6520
6521 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6522         .reset = intel_dp_encoder_reset,
6523         .destroy = intel_dp_encoder_destroy,
6524 };
6525
6526 enum irqreturn
6527 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
6528 {
6529         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6530         struct intel_dp *intel_dp = &dig_port->dp;
6531
6532         if (dig_port->base.type == INTEL_OUTPUT_EDP &&
6533             (long_hpd || !intel_pps_have_power(intel_dp))) {
6534                 /*
6535                  * vdd off can generate a long/short pulse on eDP which
6536                  * would require vdd on to handle it, and thus we
6537                  * would end up in an endless cycle of
6538                  * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
6539                  */
6540                 drm_dbg_kms(&i915->drm,
6541                             "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
6542                             long_hpd ? "long" : "short",
6543                             dig_port->base.base.base.id,
6544                             dig_port->base.base.name);
6545                 return IRQ_HANDLED;
6546         }
6547
6548         drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
6549                     dig_port->base.base.base.id,
6550                     dig_port->base.base.name,
6551                     long_hpd ? "long" : "short");
6552
6553         if (long_hpd) {
6554                 intel_dp->reset_link_params = true;
6555                 return IRQ_NONE;
6556         }
6557
6558         if (intel_dp->is_mst) {
6559                 if (!intel_dp_check_mst_status(intel_dp))
6560                         return IRQ_NONE;
6561         } else if (!intel_dp_short_pulse(intel_dp)) {
6562                 return IRQ_NONE;
6563         }
6564
6565         return IRQ_HANDLED;
6566 }
6567
6568 /* check the VBT to see whether the eDP is on another port */
6569 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6570 {
6571         /*
6572          * eDP not supported on g4x. so bail out early just
6573          * for a bit extra safety in case the VBT is bonkers.
6574          */
6575         if (INTEL_GEN(dev_priv) < 5)
6576                 return false;
6577
6578         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6579                 return true;
6580
6581         return intel_bios_is_port_edp(dev_priv, port);
6582 }
6583
6584 static void
6585 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6586 {
6587         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6588         enum port port = dp_to_dig_port(intel_dp)->base.port;
6589
6590         if (!intel_dp_is_edp(intel_dp))
6591                 drm_connector_attach_dp_subconnector_property(connector);
6592
6593         if (!IS_G4X(dev_priv) && port != PORT_A)
6594                 intel_attach_force_audio_property(connector);
6595
6596         intel_attach_broadcast_rgb_property(connector);
6597         if (HAS_GMCH(dev_priv))
6598                 drm_connector_attach_max_bpc_property(connector, 6, 10);
6599         else if (INTEL_GEN(dev_priv) >= 5)
6600                 drm_connector_attach_max_bpc_property(connector, 6, 12);
6601
6602         /* Register HDMI colorspace for case of lspcon */
6603         if (intel_bios_is_lspcon_present(dev_priv, port)) {
6604                 drm_connector_attach_content_type_property(connector);
6605                 intel_attach_hdmi_colorspace_property(connector);
6606         } else {
6607                 intel_attach_dp_colorspace_property(connector);
6608         }
6609
6610         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
6611                 drm_object_attach_property(&connector->base,
6612                                            connector->dev->mode_config.hdr_output_metadata_property,
6613                                            0);
6614
6615         if (intel_dp_is_edp(intel_dp)) {
6616                 u32 allowed_scalers;
6617
6618                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6619                 if (!HAS_GMCH(dev_priv))
6620                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6621
6622                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6623
6624                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6625
6626         }
6627 }
6628
6629 /**
6630  * intel_dp_set_drrs_state - program registers for RR switch to take effect
6631  * @dev_priv: i915 device
6632  * @crtc_state: a pointer to the active intel_crtc_state
6633  * @refresh_rate: RR to be programmed
6634  *
6635  * This function gets called when refresh rate (RR) has to be changed from
6636  * one frequency to another. Switches can be between high and low RR
6637  * supported by the panel or to any other RR based on media playback (in
6638  * this case, RR value needs to be passed from user space).
6639  *
6640  * The caller of this function needs to take a lock on dev_priv->drrs.
6641  */
6642 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6643                                     const struct intel_crtc_state *crtc_state,
6644                                     int refresh_rate)
6645 {
6646         struct intel_dp *intel_dp = dev_priv->drrs.dp;
6647         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
6648         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6649
6650         if (refresh_rate <= 0) {
6651                 drm_dbg_kms(&dev_priv->drm,
6652                             "Refresh rate should be positive non-zero.\n");
6653                 return;
6654         }
6655
6656         if (intel_dp == NULL) {
6657                 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
6658                 return;
6659         }
6660
6661         if (!intel_crtc) {
6662                 drm_dbg_kms(&dev_priv->drm,
6663                             "DRRS: intel_crtc not initialized\n");
6664                 return;
6665         }
6666
6667         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6668                 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
6669                 return;
6670         }
6671
6672         if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
6673                         refresh_rate)
6674                 index = DRRS_LOW_RR;
6675
6676         if (index == dev_priv->drrs.refresh_rate_type) {
6677                 drm_dbg_kms(&dev_priv->drm,
6678                             "DRRS requested for previously set RR...ignoring\n");
6679                 return;
6680         }
6681
6682         if (!crtc_state->hw.active) {
6683                 drm_dbg_kms(&dev_priv->drm,
6684                             "eDP encoder disabled. CRTC not Active\n");
6685                 return;
6686         }
6687
6688         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6689                 switch (index) {
6690                 case DRRS_HIGH_RR:
6691                         intel_dp_set_m_n(crtc_state, M1_N1);
6692                         break;
6693                 case DRRS_LOW_RR:
6694                         intel_dp_set_m_n(crtc_state, M2_N2);
6695                         break;
6696                 case DRRS_MAX_RR:
6697                 default:
6698                         drm_err(&dev_priv->drm,
6699                                 "Unsupported refreshrate type\n");
6700                 }
6701         } else if (INTEL_GEN(dev_priv) > 6) {
6702                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6703                 u32 val;
6704
6705                 val = intel_de_read(dev_priv, reg);
6706                 if (index > DRRS_HIGH_RR) {
6707                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6708                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6709                         else
6710                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6711                 } else {
6712                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6713                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6714                         else
6715                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6716                 }
6717                 intel_de_write(dev_priv, reg, val);
6718         }
6719
6720         dev_priv->drrs.refresh_rate_type = index;
6721
6722         drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
6723                     refresh_rate);
6724 }
6725
6726 static void
6727 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
6728 {
6729         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6730
6731         dev_priv->drrs.busy_frontbuffer_bits = 0;
6732         dev_priv->drrs.dp = intel_dp;
6733 }
6734
6735 /**
6736  * intel_edp_drrs_enable - init drrs struct if supported
6737  * @intel_dp: DP struct
6738  * @crtc_state: A pointer to the active crtc state.
6739  *
6740  * Initializes frontbuffer_bits and drrs.dp
6741  */
6742 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6743                            const struct intel_crtc_state *crtc_state)
6744 {
6745         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6746
6747         if (!crtc_state->has_drrs)
6748                 return;
6749
6750         drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
6751
6752         mutex_lock(&dev_priv->drrs.mutex);
6753
6754         if (dev_priv->drrs.dp) {
6755                 drm_warn(&dev_priv->drm, "DRRS already enabled\n");
6756                 goto unlock;
6757         }
6758
6759         intel_edp_drrs_enable_locked(intel_dp);
6760
6761 unlock:
6762         mutex_unlock(&dev_priv->drrs.mutex);
6763 }
6764
6765 static void
6766 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
6767                               const struct intel_crtc_state *crtc_state)
6768 {
6769         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6770
6771         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
6772                 int refresh;
6773
6774                 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
6775                 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
6776         }
6777
6778         dev_priv->drrs.dp = NULL;
6779 }
6780
6781 /**
6782  * intel_edp_drrs_disable - Disable DRRS
6783  * @intel_dp: DP struct
6784  * @old_crtc_state: Pointer to old crtc_state.
6785  *
6786  */
6787 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6788                             const struct intel_crtc_state *old_crtc_state)
6789 {
6790         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6791
6792         if (!old_crtc_state->has_drrs)
6793                 return;
6794
6795         mutex_lock(&dev_priv->drrs.mutex);
6796         if (!dev_priv->drrs.dp) {
6797                 mutex_unlock(&dev_priv->drrs.mutex);
6798                 return;
6799         }
6800
6801         intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
6802         mutex_unlock(&dev_priv->drrs.mutex);
6803
6804         cancel_delayed_work_sync(&dev_priv->drrs.work);
6805 }
6806
6807 /**
6808  * intel_edp_drrs_update - Update DRRS state
6809  * @intel_dp: Intel DP
6810  * @crtc_state: new CRTC state
6811  *
6812  * This function will update DRRS states, disabling or enabling DRRS when
6813  * executing fastsets. For full modeset, intel_edp_drrs_disable() and
6814  * intel_edp_drrs_enable() should be called instead.
6815  */
6816 void
6817 intel_edp_drrs_update(struct intel_dp *intel_dp,
6818                       const struct intel_crtc_state *crtc_state)
6819 {
6820         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6821
6822         if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
6823                 return;
6824
6825         mutex_lock(&dev_priv->drrs.mutex);
6826
6827         /* New state matches current one? */
6828         if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
6829                 goto unlock;
6830
6831         if (crtc_state->has_drrs)
6832                 intel_edp_drrs_enable_locked(intel_dp);
6833         else
6834                 intel_edp_drrs_disable_locked(intel_dp, crtc_state);
6835
6836 unlock:
6837         mutex_unlock(&dev_priv->drrs.mutex);
6838 }
6839
6840 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6841 {
6842         struct drm_i915_private *dev_priv =
6843                 container_of(work, typeof(*dev_priv), drrs.work.work);
6844         struct intel_dp *intel_dp;
6845
6846         mutex_lock(&dev_priv->drrs.mutex);
6847
6848         intel_dp = dev_priv->drrs.dp;
6849
6850         if (!intel_dp)
6851                 goto unlock;
6852
6853         /*
6854          * The delayed work can race with an invalidate hence we need to
6855          * recheck.
6856          */
6857
6858         if (dev_priv->drrs.busy_frontbuffer_bits)
6859                 goto unlock;
6860
6861         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6862                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6863
6864                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6865                         drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
6866         }
6867
6868 unlock:
6869         mutex_unlock(&dev_priv->drrs.mutex);
6870 }
6871
6872 /**
6873  * intel_edp_drrs_invalidate - Disable Idleness DRRS
6874  * @dev_priv: i915 device
6875  * @frontbuffer_bits: frontbuffer plane tracking bits
6876  *
6877  * This function gets called everytime rendering on the given planes start.
6878  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6879  *
6880  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6881  */
6882 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6883                                unsigned int frontbuffer_bits)
6884 {
6885         struct intel_dp *intel_dp;
6886         struct drm_crtc *crtc;
6887         enum pipe pipe;
6888
6889         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6890                 return;
6891
6892         cancel_delayed_work(&dev_priv->drrs.work);
6893
6894         mutex_lock(&dev_priv->drrs.mutex);
6895
6896         intel_dp = dev_priv->drrs.dp;
6897         if (!intel_dp) {
6898                 mutex_unlock(&dev_priv->drrs.mutex);
6899                 return;
6900         }
6901
6902         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6903         pipe = to_intel_crtc(crtc)->pipe;
6904
6905         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6906         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6907
6908         /* invalidate means busy screen hence upclock */
6909         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6910                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6911                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
6912
6913         mutex_unlock(&dev_priv->drrs.mutex);
6914 }
6915
6916 /**
6917  * intel_edp_drrs_flush - Restart Idleness DRRS
6918  * @dev_priv: i915 device
6919  * @frontbuffer_bits: frontbuffer plane tracking bits
6920  *
6921  * This function gets called every time rendering on the given planes has
6922  * completed or flip on a crtc is completed. So DRRS should be upclocked
6923  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6924  * if no other planes are dirty.
6925  *
6926  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6927  */
6928 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6929                           unsigned int frontbuffer_bits)
6930 {
6931         struct intel_dp *intel_dp;
6932         struct drm_crtc *crtc;
6933         enum pipe pipe;
6934
6935         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6936                 return;
6937
6938         cancel_delayed_work(&dev_priv->drrs.work);
6939
6940         mutex_lock(&dev_priv->drrs.mutex);
6941
6942         intel_dp = dev_priv->drrs.dp;
6943         if (!intel_dp) {
6944                 mutex_unlock(&dev_priv->drrs.mutex);
6945                 return;
6946         }
6947
6948         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6949         pipe = to_intel_crtc(crtc)->pipe;
6950
6951         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6952         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6953
6954         /* flush means busy screen hence upclock */
6955         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6956                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6957                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
6958
6959         /*
6960          * flush also means no more activity hence schedule downclock, if all
6961          * other fbs are quiescent too
6962          */
6963         if (!dev_priv->drrs.busy_frontbuffer_bits)
6964                 schedule_delayed_work(&dev_priv->drrs.work,
6965                                 msecs_to_jiffies(1000));
6966         mutex_unlock(&dev_priv->drrs.mutex);
6967 }
6968
6969 /**
6970  * DOC: Display Refresh Rate Switching (DRRS)
6971  *
6972  * Display Refresh Rate Switching (DRRS) is a power conservation feature
6973  * which enables swtching between low and high refresh rates,
6974  * dynamically, based on the usage scenario. This feature is applicable
6975  * for internal panels.
6976  *
6977  * Indication that the panel supports DRRS is given by the panel EDID, which
6978  * would list multiple refresh rates for one resolution.
6979  *
6980  * DRRS is of 2 types - static and seamless.
6981  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6982  * (may appear as a blink on screen) and is used in dock-undock scenario.
6983  * Seamless DRRS involves changing RR without any visual effect to the user
6984  * and can be used during normal system usage. This is done by programming
6985  * certain registers.
6986  *
6987  * Support for static/seamless DRRS may be indicated in the VBT based on
6988  * inputs from the panel spec.
6989  *
6990  * DRRS saves power by switching to low RR based on usage scenarios.
6991  *
6992  * The implementation is based on frontbuffer tracking implementation.  When
6993  * there is a disturbance on the screen triggered by user activity or a periodic
6994  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
6995  * no movement on screen, after a timeout of 1 second, a switch to low RR is
6996  * made.
6997  *
6998  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6999  * and intel_edp_drrs_flush() are called.
7000  *
7001  * DRRS can be further extended to support other internal panels and also
7002  * the scenario of video playback wherein RR is set based on the rate
7003  * requested by userspace.
7004  */
7005
7006 /**
7007  * intel_dp_drrs_init - Init basic DRRS work and mutex.
7008  * @connector: eDP connector
7009  * @fixed_mode: preferred mode of panel
7010  *
7011  * This function is  called only once at driver load to initialize basic
7012  * DRRS stuff.
7013  *
7014  * Returns:
7015  * Downclock mode if panel supports it, else return NULL.
7016  * DRRS support is determined by the presence of downclock mode (apart
7017  * from VBT setting).
7018  */
7019 static struct drm_display_mode *
7020 intel_dp_drrs_init(struct intel_connector *connector,
7021                    struct drm_display_mode *fixed_mode)
7022 {
7023         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7024         struct drm_display_mode *downclock_mode = NULL;
7025
7026         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7027         mutex_init(&dev_priv->drrs.mutex);
7028
7029         if (INTEL_GEN(dev_priv) <= 6) {
7030                 drm_dbg_kms(&dev_priv->drm,
7031                             "DRRS supported for Gen7 and above\n");
7032                 return NULL;
7033         }
7034
7035         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7036                 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
7037                 return NULL;
7038         }
7039
7040         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7041         if (!downclock_mode) {
7042                 drm_dbg_kms(&dev_priv->drm,
7043                             "Downclock mode is not found. DRRS not supported\n");
7044                 return NULL;
7045         }
7046
7047         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7048
7049         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7050         drm_dbg_kms(&dev_priv->drm,
7051                     "seamless DRRS supported for eDP panel.\n");
7052         return downclock_mode;
7053 }
7054
7055 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7056                                      struct intel_connector *intel_connector)
7057 {
7058         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7059         struct drm_device *dev = &dev_priv->drm;
7060         struct drm_connector *connector = &intel_connector->base;
7061         struct drm_display_mode *fixed_mode = NULL;
7062         struct drm_display_mode *downclock_mode = NULL;
7063         bool has_dpcd;
7064         enum pipe pipe = INVALID_PIPE;
7065         struct edid *edid;
7066
7067         if (!intel_dp_is_edp(intel_dp))
7068                 return true;
7069
7070         /*
7071          * On IBX/CPT we may get here with LVDS already registered. Since the
7072          * driver uses the only internal power sequencer available for both
7073          * eDP and LVDS bail out early in this case to prevent interfering
7074          * with an already powered-on LVDS power sequencer.
7075          */
7076         if (intel_get_lvds_encoder(dev_priv)) {
7077                 drm_WARN_ON(dev,
7078                             !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7079                 drm_info(&dev_priv->drm,
7080                          "LVDS was detected, not registering eDP\n");
7081
7082                 return false;
7083         }
7084
7085         intel_pps_init(intel_dp);
7086
7087         /* Cache DPCD and EDID for edp. */
7088         has_dpcd = intel_edp_init_dpcd(intel_dp);
7089
7090         if (!has_dpcd) {
7091                 /* if this fails, presume the device is a ghost */
7092                 drm_info(&dev_priv->drm,
7093                          "failed to retrieve link info, disabling eDP\n");
7094                 goto out_vdd_off;
7095         }
7096
7097         mutex_lock(&dev->mode_config.mutex);
7098         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7099         if (edid) {
7100                 if (drm_add_edid_modes(connector, edid)) {
7101                         drm_connector_update_edid_property(connector, edid);
7102                         intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
7103                 } else {
7104                         kfree(edid);
7105                         edid = ERR_PTR(-EINVAL);
7106                 }
7107         } else {
7108                 edid = ERR_PTR(-ENOENT);
7109         }
7110         intel_connector->edid = edid;
7111
7112         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7113         if (fixed_mode)
7114                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7115
7116         /* fallback to VBT if available for eDP */
7117         if (!fixed_mode)
7118                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7119         mutex_unlock(&dev->mode_config.mutex);
7120
7121         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7122                 /*
7123                  * Figure out the current pipe for the initial backlight setup.
7124                  * If the current pipe isn't valid, try the PPS pipe, and if that
7125                  * fails just assume pipe A.
7126                  */
7127                 pipe = vlv_active_pipe(intel_dp);
7128
7129                 if (pipe != PIPE_A && pipe != PIPE_B)
7130                         pipe = intel_dp->pps_pipe;
7131
7132                 if (pipe != PIPE_A && pipe != PIPE_B)
7133                         pipe = PIPE_A;
7134
7135                 drm_dbg_kms(&dev_priv->drm,
7136                             "using pipe %c for initial backlight setup\n",
7137                             pipe_name(pipe));
7138         }
7139
7140         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7141         intel_connector->panel.backlight.power = intel_pps_backlight_power;
7142         intel_panel_setup_backlight(connector, pipe);
7143
7144         if (fixed_mode) {
7145                 drm_connector_set_panel_orientation_with_quirk(connector,
7146                                 dev_priv->vbt.orientation,
7147                                 fixed_mode->hdisplay, fixed_mode->vdisplay);
7148         }
7149
7150         return true;
7151
7152 out_vdd_off:
7153         intel_pps_vdd_off_sync(intel_dp);
7154
7155         return false;
7156 }
7157
7158 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7159 {
7160         struct intel_connector *intel_connector;
7161         struct drm_connector *connector;
7162
7163         intel_connector = container_of(work, typeof(*intel_connector),
7164                                        modeset_retry_work);
7165         connector = &intel_connector->base;
7166         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7167                       connector->name);
7168
7169         /* Grab the locks before changing connector property*/
7170         mutex_lock(&connector->dev->mode_config.mutex);
7171         /* Set connector link status to BAD and send a Uevent to notify
7172          * userspace to do a modeset.
7173          */
7174         drm_connector_set_link_status_property(connector,
7175                                                DRM_MODE_LINK_STATUS_BAD);
7176         mutex_unlock(&connector->dev->mode_config.mutex);
7177         /* Send Hotplug uevent so userspace can reprobe */
7178         drm_kms_helper_hotplug_event(connector->dev);
7179 }
7180
7181 bool
7182 intel_dp_init_connector(struct intel_digital_port *dig_port,
7183                         struct intel_connector *intel_connector)
7184 {
7185         struct drm_connector *connector = &intel_connector->base;
7186         struct intel_dp *intel_dp = &dig_port->dp;
7187         struct intel_encoder *intel_encoder = &dig_port->base;
7188         struct drm_device *dev = intel_encoder->base.dev;
7189         struct drm_i915_private *dev_priv = to_i915(dev);
7190         enum port port = intel_encoder->port;
7191         enum phy phy = intel_port_to_phy(dev_priv, port);
7192         int type;
7193
7194         /* Initialize the work for modeset in case of link train failure */
7195         INIT_WORK(&intel_connector->modeset_retry_work,
7196                   intel_dp_modeset_retry_work_fn);
7197
7198         if (drm_WARN(dev, dig_port->max_lanes < 1,
7199                      "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
7200                      dig_port->max_lanes, intel_encoder->base.base.id,
7201                      intel_encoder->base.name))
7202                 return false;
7203
7204         intel_dp_set_source_rates(intel_dp);
7205
7206         intel_dp->reset_link_params = true;
7207         intel_dp->pps_pipe = INVALID_PIPE;
7208         intel_dp->active_pipe = INVALID_PIPE;
7209
7210         /* Preserve the current hw state. */
7211         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
7212         intel_dp->attached_connector = intel_connector;
7213
7214         if (intel_dp_is_port_edp(dev_priv, port)) {
7215                 /*
7216                  * Currently we don't support eDP on TypeC ports, although in
7217                  * theory it could work on TypeC legacy ports.
7218                  */
7219                 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
7220                 type = DRM_MODE_CONNECTOR_eDP;
7221         } else {
7222                 type = DRM_MODE_CONNECTOR_DisplayPort;
7223         }
7224
7225         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7226                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7227
7228         /*
7229          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7230          * for DP the encoder type can be set by the caller to
7231          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7232          */
7233         if (type == DRM_MODE_CONNECTOR_eDP)
7234                 intel_encoder->type = INTEL_OUTPUT_EDP;
7235
7236         /* eDP only on port B and/or C on vlv/chv */
7237         if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
7238                               IS_CHERRYVIEW(dev_priv)) &&
7239                         intel_dp_is_edp(intel_dp) &&
7240                         port != PORT_B && port != PORT_C))
7241                 return false;
7242
7243         drm_dbg_kms(&dev_priv->drm,
7244                     "Adding %s connector on [ENCODER:%d:%s]\n",
7245                     type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7246                     intel_encoder->base.base.id, intel_encoder->base.name);
7247
7248         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7249         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7250
7251         if (!HAS_GMCH(dev_priv))
7252                 connector->interlace_allowed = true;
7253         connector->doublescan_allowed = 0;
7254
7255         intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
7256
7257         intel_dp_aux_init(intel_dp);
7258
7259         intel_connector_attach_encoder(intel_connector, intel_encoder);
7260
7261         if (HAS_DDI(dev_priv))
7262                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7263         else
7264                 intel_connector->get_hw_state = intel_connector_get_hw_state;
7265
7266         /* init MST on ports that can support it */
7267         intel_dp_mst_encoder_init(dig_port,
7268                                   intel_connector->base.base.id);
7269
7270         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7271                 intel_dp_aux_fini(intel_dp);
7272                 intel_dp_mst_encoder_cleanup(dig_port);
7273                 goto fail;
7274         }
7275
7276         intel_dp_add_properties(intel_dp, connector);
7277
7278         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7279                 int ret = intel_dp_init_hdcp(dig_port, intel_connector);
7280                 if (ret)
7281                         drm_dbg_kms(&dev_priv->drm,
7282                                     "HDCP init failed, skipping.\n");
7283         }
7284
7285         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7286          * 0xd.  Failure to do so will result in spurious interrupts being
7287          * generated on the port when a cable is not attached.
7288          */
7289         if (IS_G45(dev_priv)) {
7290                 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
7291                 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
7292                                (temp & ~0xf) | 0xd);
7293         }
7294
7295         intel_dp->frl.is_trained = false;
7296         intel_dp->frl.trained_rate_gbps = 0;
7297
7298         return true;
7299
7300 fail:
7301         drm_connector_cleanup(connector);
7302
7303         return false;
7304 }
7305
7306 bool intel_dp_init(struct drm_i915_private *dev_priv,
7307                    i915_reg_t output_reg,
7308                    enum port port)
7309 {
7310         struct intel_digital_port *dig_port;
7311         struct intel_encoder *intel_encoder;
7312         struct drm_encoder *encoder;
7313         struct intel_connector *intel_connector;
7314
7315         dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
7316         if (!dig_port)
7317                 return false;
7318
7319         intel_connector = intel_connector_alloc();
7320         if (!intel_connector)
7321                 goto err_connector_alloc;
7322
7323         intel_encoder = &dig_port->base;
7324         encoder = &intel_encoder->base;
7325
7326         mutex_init(&dig_port->hdcp_mutex);
7327
7328         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7329                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7330                              "DP %c", port_name(port)))
7331                 goto err_encoder_init;
7332
7333         intel_encoder->hotplug = intel_dp_hotplug;
7334         intel_encoder->compute_config = intel_dp_compute_config;
7335         intel_encoder->get_hw_state = intel_dp_get_hw_state;
7336         intel_encoder->get_config = intel_dp_get_config;
7337         intel_encoder->sync_state = intel_dp_sync_state;
7338         intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
7339         intel_encoder->update_pipe = intel_panel_update_backlight;
7340         intel_encoder->suspend = intel_dp_encoder_suspend;
7341         intel_encoder->shutdown = intel_dp_encoder_shutdown;
7342         if (IS_CHERRYVIEW(dev_priv)) {
7343                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7344                 intel_encoder->pre_enable = chv_pre_enable_dp;
7345                 intel_encoder->enable = vlv_enable_dp;
7346                 intel_encoder->disable = vlv_disable_dp;
7347                 intel_encoder->post_disable = chv_post_disable_dp;
7348                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7349         } else if (IS_VALLEYVIEW(dev_priv)) {
7350                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7351                 intel_encoder->pre_enable = vlv_pre_enable_dp;
7352                 intel_encoder->enable = vlv_enable_dp;
7353                 intel_encoder->disable = vlv_disable_dp;
7354                 intel_encoder->post_disable = vlv_post_disable_dp;
7355         } else {
7356                 intel_encoder->pre_enable = g4x_pre_enable_dp;
7357                 intel_encoder->enable = g4x_enable_dp;
7358                 intel_encoder->disable = g4x_disable_dp;
7359                 intel_encoder->post_disable = g4x_post_disable_dp;
7360         }
7361
7362         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
7363             (HAS_PCH_CPT(dev_priv) && port != PORT_A))
7364                 dig_port->dp.set_link_train = cpt_set_link_train;
7365         else
7366                 dig_port->dp.set_link_train = g4x_set_link_train;
7367
7368         if (IS_CHERRYVIEW(dev_priv))
7369                 dig_port->dp.set_signal_levels = chv_set_signal_levels;
7370         else if (IS_VALLEYVIEW(dev_priv))
7371                 dig_port->dp.set_signal_levels = vlv_set_signal_levels;
7372         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
7373                 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
7374         else if (IS_GEN(dev_priv, 6) && port == PORT_A)
7375                 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
7376         else
7377                 dig_port->dp.set_signal_levels = g4x_set_signal_levels;
7378
7379         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
7380             (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
7381                 dig_port->dp.preemph_max = intel_dp_preemph_max_3;
7382                 dig_port->dp.voltage_max = intel_dp_voltage_max_3;
7383         } else {
7384                 dig_port->dp.preemph_max = intel_dp_preemph_max_2;
7385                 dig_port->dp.voltage_max = intel_dp_voltage_max_2;
7386         }
7387
7388         dig_port->dp.output_reg = output_reg;
7389         dig_port->max_lanes = 4;
7390
7391         intel_encoder->type = INTEL_OUTPUT_DP;
7392         intel_encoder->power_domain = intel_port_to_power_domain(port);
7393         if (IS_CHERRYVIEW(dev_priv)) {
7394                 if (port == PORT_D)
7395                         intel_encoder->pipe_mask = BIT(PIPE_C);
7396                 else
7397                         intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
7398         } else {
7399                 intel_encoder->pipe_mask = ~0;
7400         }
7401         intel_encoder->cloneable = 0;
7402         intel_encoder->port = port;
7403         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7404
7405         dig_port->hpd_pulse = intel_dp_hpd_pulse;
7406
7407         if (HAS_GMCH(dev_priv)) {
7408                 if (IS_GM45(dev_priv))
7409                         dig_port->connected = gm45_digital_port_connected;
7410                 else
7411                         dig_port->connected = g4x_digital_port_connected;
7412         } else {
7413                 if (port == PORT_A)
7414                         dig_port->connected = ilk_digital_port_connected;
7415                 else
7416                         dig_port->connected = ibx_digital_port_connected;
7417         }
7418
7419         if (port != PORT_A)
7420                 intel_infoframe_init(dig_port);
7421
7422         dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7423         if (!intel_dp_init_connector(dig_port, intel_connector))
7424                 goto err_init_connector;
7425
7426         return true;
7427
7428 err_init_connector:
7429         drm_encoder_cleanup(encoder);
7430 err_encoder_init:
7431         kfree(intel_connector);
7432 err_connector_alloc:
7433         kfree(dig_port);
7434         return false;
7435 }
7436
7437 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7438 {
7439         struct intel_encoder *encoder;
7440
7441         for_each_intel_encoder(&dev_priv->drm, encoder) {
7442                 struct intel_dp *intel_dp;
7443
7444                 if (encoder->type != INTEL_OUTPUT_DDI)
7445                         continue;
7446
7447                 intel_dp = enc_to_intel_dp(encoder);
7448
7449                 if (!intel_dp->can_mst)
7450                         continue;
7451
7452                 if (intel_dp->is_mst)
7453                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7454         }
7455 }
7456
7457 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7458 {
7459         struct intel_encoder *encoder;
7460
7461         for_each_intel_encoder(&dev_priv->drm, encoder) {
7462                 struct intel_dp *intel_dp;
7463                 int ret;
7464
7465                 if (encoder->type != INTEL_OUTPUT_DDI)
7466                         continue;
7467
7468                 intel_dp = enc_to_intel_dp(encoder);
7469
7470                 if (!intel_dp->can_mst)
7471                         continue;
7472
7473                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
7474                                                      true);
7475                 if (ret) {
7476                         intel_dp->is_mst = false;
7477                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7478                                                         false);
7479                 }
7480         }
7481 }