drm/i915: Pass atomic state to encoder hooks
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44
45 #include "display/intel_crt.h"
46 #include "display/intel_ddi.h"
47 #include "display/intel_dp.h"
48 #include "display/intel_dp_mst.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
78 #include "intel_pm.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
83 #include "intel_tc.h"
84 #include "intel_vga.h"
85
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
88         DRM_FORMAT_C8,
89         DRM_FORMAT_XRGB1555,
90         DRM_FORMAT_RGB565,
91         DRM_FORMAT_XRGB8888,
92 };
93
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
96         DRM_FORMAT_C8,
97         DRM_FORMAT_RGB565,
98         DRM_FORMAT_XRGB8888,
99         DRM_FORMAT_XBGR8888,
100         DRM_FORMAT_XRGB2101010,
101         DRM_FORMAT_XBGR2101010,
102 };
103
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
106         DRM_FORMAT_C8,
107         DRM_FORMAT_RGB565,
108         DRM_FORMAT_XRGB8888,
109         DRM_FORMAT_XBGR8888,
110         DRM_FORMAT_XRGB2101010,
111         DRM_FORMAT_XBGR2101010,
112         DRM_FORMAT_XBGR16161616F,
113 };
114
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
117         DRM_FORMAT_C8,
118         DRM_FORMAT_RGB565,
119         DRM_FORMAT_XRGB8888,
120         DRM_FORMAT_XBGR8888,
121         DRM_FORMAT_ARGB8888,
122         DRM_FORMAT_ABGR8888,
123         DRM_FORMAT_XRGB2101010,
124         DRM_FORMAT_XBGR2101010,
125         DRM_FORMAT_ARGB2101010,
126         DRM_FORMAT_ABGR2101010,
127         DRM_FORMAT_XBGR16161616F,
128 };
129
130 static const u64 i9xx_format_modifiers[] = {
131         I915_FORMAT_MOD_X_TILED,
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 /* Cursor formats */
137 static const u32 intel_cursor_formats[] = {
138         DRM_FORMAT_ARGB8888,
139 };
140
141 static const u64 cursor_format_modifiers[] = {
142         DRM_FORMAT_MOD_LINEAR,
143         DRM_FORMAT_MOD_INVALID
144 };
145
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147                                 struct intel_crtc_state *pipe_config);
148 static void ilk_pch_clock_get(struct intel_crtc *crtc,
149                               struct intel_crtc_state *pipe_config);
150
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152                                   struct drm_i915_gem_object *obj,
153                                   struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157                                          const struct intel_link_m_n *m_n,
158                                          const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164                             const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166                             const struct intel_crtc_state *pipe_config);
167 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
168 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
169 static void intel_modeset_setup_hw_state(struct drm_device *dev,
170                                          struct drm_modeset_acquire_ctx *ctx);
171 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
172
173 struct intel_limit {
174         struct {
175                 int min, max;
176         } dot, vco, n, m, m1, m2, p, p1;
177
178         struct {
179                 int dot_limit;
180                 int p2_slow, p2_fast;
181         } p2;
182 };
183
184 /* returns HPLL frequency in kHz */
185 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
186 {
187         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
188
189         /* Obtain SKU information */
190         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
191                 CCK_FUSE_HPLL_FREQ_MASK;
192
193         return vco_freq[hpll_freq] * 1000;
194 }
195
196 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
197                       const char *name, u32 reg, int ref_freq)
198 {
199         u32 val;
200         int divider;
201
202         val = vlv_cck_read(dev_priv, reg);
203         divider = val & CCK_FREQUENCY_VALUES;
204
205         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
206                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
207                  "%s change in progress\n", name);
208
209         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
210 }
211
212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
213                            const char *name, u32 reg)
214 {
215         int hpll;
216
217         vlv_cck_get(dev_priv);
218
219         if (dev_priv->hpll_freq == 0)
220                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
221
222         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
223
224         vlv_cck_put(dev_priv);
225
226         return hpll;
227 }
228
229 static void intel_update_czclk(struct drm_i915_private *dev_priv)
230 {
231         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
232                 return;
233
234         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
235                                                       CCK_CZ_CLOCK_CONTROL);
236
237         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
238                 dev_priv->czclk_freq);
239 }
240
241 static inline u32 /* units of 100MHz */
242 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
243                     const struct intel_crtc_state *pipe_config)
244 {
245         if (HAS_DDI(dev_priv))
246                 return pipe_config->port_clock; /* SPLL */
247         else
248                 return dev_priv->fdi_pll_freq;
249 }
250
251 static const struct intel_limit intel_limits_i8xx_dac = {
252         .dot = { .min = 25000, .max = 350000 },
253         .vco = { .min = 908000, .max = 1512000 },
254         .n = { .min = 2, .max = 16 },
255         .m = { .min = 96, .max = 140 },
256         .m1 = { .min = 18, .max = 26 },
257         .m2 = { .min = 6, .max = 16 },
258         .p = { .min = 4, .max = 128 },
259         .p1 = { .min = 2, .max = 33 },
260         .p2 = { .dot_limit = 165000,
261                 .p2_slow = 4, .p2_fast = 2 },
262 };
263
264 static const struct intel_limit intel_limits_i8xx_dvo = {
265         .dot = { .min = 25000, .max = 350000 },
266         .vco = { .min = 908000, .max = 1512000 },
267         .n = { .min = 2, .max = 16 },
268         .m = { .min = 96, .max = 140 },
269         .m1 = { .min = 18, .max = 26 },
270         .m2 = { .min = 6, .max = 16 },
271         .p = { .min = 4, .max = 128 },
272         .p1 = { .min = 2, .max = 33 },
273         .p2 = { .dot_limit = 165000,
274                 .p2_slow = 4, .p2_fast = 4 },
275 };
276
277 static const struct intel_limit intel_limits_i8xx_lvds = {
278         .dot = { .min = 25000, .max = 350000 },
279         .vco = { .min = 908000, .max = 1512000 },
280         .n = { .min = 2, .max = 16 },
281         .m = { .min = 96, .max = 140 },
282         .m1 = { .min = 18, .max = 26 },
283         .m2 = { .min = 6, .max = 16 },
284         .p = { .min = 4, .max = 128 },
285         .p1 = { .min = 1, .max = 6 },
286         .p2 = { .dot_limit = 165000,
287                 .p2_slow = 14, .p2_fast = 7 },
288 };
289
290 static const struct intel_limit intel_limits_i9xx_sdvo = {
291         .dot = { .min = 20000, .max = 400000 },
292         .vco = { .min = 1400000, .max = 2800000 },
293         .n = { .min = 1, .max = 6 },
294         .m = { .min = 70, .max = 120 },
295         .m1 = { .min = 8, .max = 18 },
296         .m2 = { .min = 3, .max = 7 },
297         .p = { .min = 5, .max = 80 },
298         .p1 = { .min = 1, .max = 8 },
299         .p2 = { .dot_limit = 200000,
300                 .p2_slow = 10, .p2_fast = 5 },
301 };
302
303 static const struct intel_limit intel_limits_i9xx_lvds = {
304         .dot = { .min = 20000, .max = 400000 },
305         .vco = { .min = 1400000, .max = 2800000 },
306         .n = { .min = 1, .max = 6 },
307         .m = { .min = 70, .max = 120 },
308         .m1 = { .min = 8, .max = 18 },
309         .m2 = { .min = 3, .max = 7 },
310         .p = { .min = 7, .max = 98 },
311         .p1 = { .min = 1, .max = 8 },
312         .p2 = { .dot_limit = 112000,
313                 .p2_slow = 14, .p2_fast = 7 },
314 };
315
316
317 static const struct intel_limit intel_limits_g4x_sdvo = {
318         .dot = { .min = 25000, .max = 270000 },
319         .vco = { .min = 1750000, .max = 3500000},
320         .n = { .min = 1, .max = 4 },
321         .m = { .min = 104, .max = 138 },
322         .m1 = { .min = 17, .max = 23 },
323         .m2 = { .min = 5, .max = 11 },
324         .p = { .min = 10, .max = 30 },
325         .p1 = { .min = 1, .max = 3},
326         .p2 = { .dot_limit = 270000,
327                 .p2_slow = 10,
328                 .p2_fast = 10
329         },
330 };
331
332 static const struct intel_limit intel_limits_g4x_hdmi = {
333         .dot = { .min = 22000, .max = 400000 },
334         .vco = { .min = 1750000, .max = 3500000},
335         .n = { .min = 1, .max = 4 },
336         .m = { .min = 104, .max = 138 },
337         .m1 = { .min = 16, .max = 23 },
338         .m2 = { .min = 5, .max = 11 },
339         .p = { .min = 5, .max = 80 },
340         .p1 = { .min = 1, .max = 8},
341         .p2 = { .dot_limit = 165000,
342                 .p2_slow = 10, .p2_fast = 5 },
343 };
344
345 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
346         .dot = { .min = 20000, .max = 115000 },
347         .vco = { .min = 1750000, .max = 3500000 },
348         .n = { .min = 1, .max = 3 },
349         .m = { .min = 104, .max = 138 },
350         .m1 = { .min = 17, .max = 23 },
351         .m2 = { .min = 5, .max = 11 },
352         .p = { .min = 28, .max = 112 },
353         .p1 = { .min = 2, .max = 8 },
354         .p2 = { .dot_limit = 0,
355                 .p2_slow = 14, .p2_fast = 14
356         },
357 };
358
359 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
360         .dot = { .min = 80000, .max = 224000 },
361         .vco = { .min = 1750000, .max = 3500000 },
362         .n = { .min = 1, .max = 3 },
363         .m = { .min = 104, .max = 138 },
364         .m1 = { .min = 17, .max = 23 },
365         .m2 = { .min = 5, .max = 11 },
366         .p = { .min = 14, .max = 42 },
367         .p1 = { .min = 2, .max = 6 },
368         .p2 = { .dot_limit = 0,
369                 .p2_slow = 7, .p2_fast = 7
370         },
371 };
372
373 static const struct intel_limit pnv_limits_sdvo = {
374         .dot = { .min = 20000, .max = 400000},
375         .vco = { .min = 1700000, .max = 3500000 },
376         /* Pineview's Ncounter is a ring counter */
377         .n = { .min = 3, .max = 6 },
378         .m = { .min = 2, .max = 256 },
379         /* Pineview only has one combined m divider, which we treat as m2. */
380         .m1 = { .min = 0, .max = 0 },
381         .m2 = { .min = 0, .max = 254 },
382         .p = { .min = 5, .max = 80 },
383         .p1 = { .min = 1, .max = 8 },
384         .p2 = { .dot_limit = 200000,
385                 .p2_slow = 10, .p2_fast = 5 },
386 };
387
388 static const struct intel_limit pnv_limits_lvds = {
389         .dot = { .min = 20000, .max = 400000 },
390         .vco = { .min = 1700000, .max = 3500000 },
391         .n = { .min = 3, .max = 6 },
392         .m = { .min = 2, .max = 256 },
393         .m1 = { .min = 0, .max = 0 },
394         .m2 = { .min = 0, .max = 254 },
395         .p = { .min = 7, .max = 112 },
396         .p1 = { .min = 1, .max = 8 },
397         .p2 = { .dot_limit = 112000,
398                 .p2_slow = 14, .p2_fast = 14 },
399 };
400
401 /* Ironlake / Sandybridge
402  *
403  * We calculate clock using (register_value + 2) for N/M1/M2, so here
404  * the range value for them is (actual_value - 2).
405  */
406 static const struct intel_limit ilk_limits_dac = {
407         .dot = { .min = 25000, .max = 350000 },
408         .vco = { .min = 1760000, .max = 3510000 },
409         .n = { .min = 1, .max = 5 },
410         .m = { .min = 79, .max = 127 },
411         .m1 = { .min = 12, .max = 22 },
412         .m2 = { .min = 5, .max = 9 },
413         .p = { .min = 5, .max = 80 },
414         .p1 = { .min = 1, .max = 8 },
415         .p2 = { .dot_limit = 225000,
416                 .p2_slow = 10, .p2_fast = 5 },
417 };
418
419 static const struct intel_limit ilk_limits_single_lvds = {
420         .dot = { .min = 25000, .max = 350000 },
421         .vco = { .min = 1760000, .max = 3510000 },
422         .n = { .min = 1, .max = 3 },
423         .m = { .min = 79, .max = 118 },
424         .m1 = { .min = 12, .max = 22 },
425         .m2 = { .min = 5, .max = 9 },
426         .p = { .min = 28, .max = 112 },
427         .p1 = { .min = 2, .max = 8 },
428         .p2 = { .dot_limit = 225000,
429                 .p2_slow = 14, .p2_fast = 14 },
430 };
431
432 static const struct intel_limit ilk_limits_dual_lvds = {
433         .dot = { .min = 25000, .max = 350000 },
434         .vco = { .min = 1760000, .max = 3510000 },
435         .n = { .min = 1, .max = 3 },
436         .m = { .min = 79, .max = 127 },
437         .m1 = { .min = 12, .max = 22 },
438         .m2 = { .min = 5, .max = 9 },
439         .p = { .min = 14, .max = 56 },
440         .p1 = { .min = 2, .max = 8 },
441         .p2 = { .dot_limit = 225000,
442                 .p2_slow = 7, .p2_fast = 7 },
443 };
444
445 /* LVDS 100mhz refclk limits. */
446 static const struct intel_limit ilk_limits_single_lvds_100m = {
447         .dot = { .min = 25000, .max = 350000 },
448         .vco = { .min = 1760000, .max = 3510000 },
449         .n = { .min = 1, .max = 2 },
450         .m = { .min = 79, .max = 126 },
451         .m1 = { .min = 12, .max = 22 },
452         .m2 = { .min = 5, .max = 9 },
453         .p = { .min = 28, .max = 112 },
454         .p1 = { .min = 2, .max = 8 },
455         .p2 = { .dot_limit = 225000,
456                 .p2_slow = 14, .p2_fast = 14 },
457 };
458
459 static const struct intel_limit ilk_limits_dual_lvds_100m = {
460         .dot = { .min = 25000, .max = 350000 },
461         .vco = { .min = 1760000, .max = 3510000 },
462         .n = { .min = 1, .max = 3 },
463         .m = { .min = 79, .max = 126 },
464         .m1 = { .min = 12, .max = 22 },
465         .m2 = { .min = 5, .max = 9 },
466         .p = { .min = 14, .max = 42 },
467         .p1 = { .min = 2, .max = 6 },
468         .p2 = { .dot_limit = 225000,
469                 .p2_slow = 7, .p2_fast = 7 },
470 };
471
472 static const struct intel_limit intel_limits_vlv = {
473          /*
474           * These are the data rate limits (measured in fast clocks)
475           * since those are the strictest limits we have. The fast
476           * clock and actual rate limits are more relaxed, so checking
477           * them would make no difference.
478           */
479         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
480         .vco = { .min = 4000000, .max = 6000000 },
481         .n = { .min = 1, .max = 7 },
482         .m1 = { .min = 2, .max = 3 },
483         .m2 = { .min = 11, .max = 156 },
484         .p1 = { .min = 2, .max = 3 },
485         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
486 };
487
488 static const struct intel_limit intel_limits_chv = {
489         /*
490          * These are the data rate limits (measured in fast clocks)
491          * since those are the strictest limits we have.  The fast
492          * clock and actual rate limits are more relaxed, so checking
493          * them would make no difference.
494          */
495         .dot = { .min = 25000 * 5, .max = 540000 * 5},
496         .vco = { .min = 4800000, .max = 6480000 },
497         .n = { .min = 1, .max = 1 },
498         .m1 = { .min = 2, .max = 2 },
499         .m2 = { .min = 24 << 22, .max = 175 << 22 },
500         .p1 = { .min = 2, .max = 4 },
501         .p2 = { .p2_slow = 1, .p2_fast = 14 },
502 };
503
504 static const struct intel_limit intel_limits_bxt = {
505         /* FIXME: find real dot limits */
506         .dot = { .min = 0, .max = INT_MAX },
507         .vco = { .min = 4800000, .max = 6700000 },
508         .n = { .min = 1, .max = 1 },
509         .m1 = { .min = 2, .max = 2 },
510         /* FIXME: find real m2 limits */
511         .m2 = { .min = 2 << 22, .max = 255 << 22 },
512         .p1 = { .min = 2, .max = 4 },
513         .p2 = { .p2_slow = 1, .p2_fast = 20 },
514 };
515
516 /* WA Display #0827: Gen9:all */
517 static void
518 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
519 {
520         if (enable)
521                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
522                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
523         else
524                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
525                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
526 }
527
528 /* Wa_2006604312:icl */
529 static void
530 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
531                        bool enable)
532 {
533         if (enable)
534                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
535                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
536         else
537                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
538                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
539 }
540
541 static bool
542 needs_modeset(const struct intel_crtc_state *state)
543 {
544         return drm_atomic_crtc_needs_modeset(&state->uapi);
545 }
546
547 bool
548 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
549 {
550         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
551                 crtc_state->sync_mode_slaves_mask);
552 }
553
554 static bool
555 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
556 {
557         return crtc_state->master_transcoder != INVALID_TRANSCODER;
558 }
559
560 /*
561  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
562  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
563  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
564  * The helpers' return value is the rate of the clock that is fed to the
565  * display engine's pipe which can be the above fast dot clock rate or a
566  * divided-down version of it.
567  */
568 /* m1 is reserved as 0 in Pineview, n is a ring counter */
569 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
570 {
571         clock->m = clock->m2 + 2;
572         clock->p = clock->p1 * clock->p2;
573         if (WARN_ON(clock->n == 0 || clock->p == 0))
574                 return 0;
575         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
576         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
577
578         return clock->dot;
579 }
580
581 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
582 {
583         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
584 }
585
586 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
587 {
588         clock->m = i9xx_dpll_compute_m(clock);
589         clock->p = clock->p1 * clock->p2;
590         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
591                 return 0;
592         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
593         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
594
595         return clock->dot;
596 }
597
598 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
599 {
600         clock->m = clock->m1 * clock->m2;
601         clock->p = clock->p1 * clock->p2;
602         if (WARN_ON(clock->n == 0 || clock->p == 0))
603                 return 0;
604         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
605         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
606
607         return clock->dot / 5;
608 }
609
610 int chv_calc_dpll_params(int refclk, struct dpll *clock)
611 {
612         clock->m = clock->m1 * clock->m2;
613         clock->p = clock->p1 * clock->p2;
614         if (WARN_ON(clock->n == 0 || clock->p == 0))
615                 return 0;
616         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
617                                            clock->n << 22);
618         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
619
620         return clock->dot / 5;
621 }
622
623 /*
624  * Returns whether the given set of divisors are valid for a given refclk with
625  * the given connectors.
626  */
627 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
628                                const struct intel_limit *limit,
629                                const struct dpll *clock)
630 {
631         if (clock->n < limit->n.min || limit->n.max < clock->n)
632                 return false;
633         if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
634                 return false;
635         if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
636                 return false;
637         if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
638                 return false;
639
640         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
641             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
642                 if (clock->m1 <= clock->m2)
643                         return false;
644
645         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
646             !IS_GEN9_LP(dev_priv)) {
647                 if (clock->p < limit->p.min || limit->p.max < clock->p)
648                         return false;
649                 if (clock->m < limit->m.min || limit->m.max < clock->m)
650                         return false;
651         }
652
653         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
654                 return false;
655         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
656          * connector, etc., rather than just a single range.
657          */
658         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
659                 return false;
660
661         return true;
662 }
663
664 static int
665 i9xx_select_p2_div(const struct intel_limit *limit,
666                    const struct intel_crtc_state *crtc_state,
667                    int target)
668 {
669         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
670
671         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
672                 /*
673                  * For LVDS just rely on its current settings for dual-channel.
674                  * We haven't figured out how to reliably set up different
675                  * single/dual channel state, if we even can.
676                  */
677                 if (intel_is_dual_link_lvds(dev_priv))
678                         return limit->p2.p2_fast;
679                 else
680                         return limit->p2.p2_slow;
681         } else {
682                 if (target < limit->p2.dot_limit)
683                         return limit->p2.p2_slow;
684                 else
685                         return limit->p2.p2_fast;
686         }
687 }
688
689 /*
690  * Returns a set of divisors for the desired target clock with the given
691  * refclk, or FALSE.  The returned values represent the clock equation:
692  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
693  *
694  * Target and reference clocks are specified in kHz.
695  *
696  * If match_clock is provided, then best_clock P divider must match the P
697  * divider from @match_clock used for LVDS downclocking.
698  */
699 static bool
700 i9xx_find_best_dpll(const struct intel_limit *limit,
701                     struct intel_crtc_state *crtc_state,
702                     int target, int refclk, struct dpll *match_clock,
703                     struct dpll *best_clock)
704 {
705         struct drm_device *dev = crtc_state->uapi.crtc->dev;
706         struct dpll clock;
707         int err = target;
708
709         memset(best_clock, 0, sizeof(*best_clock));
710
711         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
712
713         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
714              clock.m1++) {
715                 for (clock.m2 = limit->m2.min;
716                      clock.m2 <= limit->m2.max; clock.m2++) {
717                         if (clock.m2 >= clock.m1)
718                                 break;
719                         for (clock.n = limit->n.min;
720                              clock.n <= limit->n.max; clock.n++) {
721                                 for (clock.p1 = limit->p1.min;
722                                         clock.p1 <= limit->p1.max; clock.p1++) {
723                                         int this_err;
724
725                                         i9xx_calc_dpll_params(refclk, &clock);
726                                         if (!intel_pll_is_valid(to_i915(dev),
727                                                                 limit,
728                                                                 &clock))
729                                                 continue;
730                                         if (match_clock &&
731                                             clock.p != match_clock->p)
732                                                 continue;
733
734                                         this_err = abs(clock.dot - target);
735                                         if (this_err < err) {
736                                                 *best_clock = clock;
737                                                 err = this_err;
738                                         }
739                                 }
740                         }
741                 }
742         }
743
744         return (err != target);
745 }
746
747 /*
748  * Returns a set of divisors for the desired target clock with the given
749  * refclk, or FALSE.  The returned values represent the clock equation:
750  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
751  *
752  * Target and reference clocks are specified in kHz.
753  *
754  * If match_clock is provided, then best_clock P divider must match the P
755  * divider from @match_clock used for LVDS downclocking.
756  */
757 static bool
758 pnv_find_best_dpll(const struct intel_limit *limit,
759                    struct intel_crtc_state *crtc_state,
760                    int target, int refclk, struct dpll *match_clock,
761                    struct dpll *best_clock)
762 {
763         struct drm_device *dev = crtc_state->uapi.crtc->dev;
764         struct dpll clock;
765         int err = target;
766
767         memset(best_clock, 0, sizeof(*best_clock));
768
769         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
770
771         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
772              clock.m1++) {
773                 for (clock.m2 = limit->m2.min;
774                      clock.m2 <= limit->m2.max; clock.m2++) {
775                         for (clock.n = limit->n.min;
776                              clock.n <= limit->n.max; clock.n++) {
777                                 for (clock.p1 = limit->p1.min;
778                                         clock.p1 <= limit->p1.max; clock.p1++) {
779                                         int this_err;
780
781                                         pnv_calc_dpll_params(refclk, &clock);
782                                         if (!intel_pll_is_valid(to_i915(dev),
783                                                                 limit,
784                                                                 &clock))
785                                                 continue;
786                                         if (match_clock &&
787                                             clock.p != match_clock->p)
788                                                 continue;
789
790                                         this_err = abs(clock.dot - target);
791                                         if (this_err < err) {
792                                                 *best_clock = clock;
793                                                 err = this_err;
794                                         }
795                                 }
796                         }
797                 }
798         }
799
800         return (err != target);
801 }
802
803 /*
804  * Returns a set of divisors for the desired target clock with the given
805  * refclk, or FALSE.  The returned values represent the clock equation:
806  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
807  *
808  * Target and reference clocks are specified in kHz.
809  *
810  * If match_clock is provided, then best_clock P divider must match the P
811  * divider from @match_clock used for LVDS downclocking.
812  */
813 static bool
814 g4x_find_best_dpll(const struct intel_limit *limit,
815                    struct intel_crtc_state *crtc_state,
816                    int target, int refclk, struct dpll *match_clock,
817                    struct dpll *best_clock)
818 {
819         struct drm_device *dev = crtc_state->uapi.crtc->dev;
820         struct dpll clock;
821         int max_n;
822         bool found = false;
823         /* approximately equals target * 0.00585 */
824         int err_most = (target >> 8) + (target >> 9);
825
826         memset(best_clock, 0, sizeof(*best_clock));
827
828         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
829
830         max_n = limit->n.max;
831         /* based on hardware requirement, prefer smaller n to precision */
832         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
833                 /* based on hardware requirement, prefere larger m1,m2 */
834                 for (clock.m1 = limit->m1.max;
835                      clock.m1 >= limit->m1.min; clock.m1--) {
836                         for (clock.m2 = limit->m2.max;
837                              clock.m2 >= limit->m2.min; clock.m2--) {
838                                 for (clock.p1 = limit->p1.max;
839                                      clock.p1 >= limit->p1.min; clock.p1--) {
840                                         int this_err;
841
842                                         i9xx_calc_dpll_params(refclk, &clock);
843                                         if (!intel_pll_is_valid(to_i915(dev),
844                                                                 limit,
845                                                                 &clock))
846                                                 continue;
847
848                                         this_err = abs(clock.dot - target);
849                                         if (this_err < err_most) {
850                                                 *best_clock = clock;
851                                                 err_most = this_err;
852                                                 max_n = clock.n;
853                                                 found = true;
854                                         }
855                                 }
856                         }
857                 }
858         }
859         return found;
860 }
861
862 /*
863  * Check if the calculated PLL configuration is more optimal compared to the
864  * best configuration and error found so far. Return the calculated error.
865  */
866 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
867                                const struct dpll *calculated_clock,
868                                const struct dpll *best_clock,
869                                unsigned int best_error_ppm,
870                                unsigned int *error_ppm)
871 {
872         /*
873          * For CHV ignore the error and consider only the P value.
874          * Prefer a bigger P value based on HW requirements.
875          */
876         if (IS_CHERRYVIEW(to_i915(dev))) {
877                 *error_ppm = 0;
878
879                 return calculated_clock->p > best_clock->p;
880         }
881
882         if (drm_WARN_ON_ONCE(dev, !target_freq))
883                 return false;
884
885         *error_ppm = div_u64(1000000ULL *
886                                 abs(target_freq - calculated_clock->dot),
887                              target_freq);
888         /*
889          * Prefer a better P value over a better (smaller) error if the error
890          * is small. Ensure this preference for future configurations too by
891          * setting the error to 0.
892          */
893         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
894                 *error_ppm = 0;
895
896                 return true;
897         }
898
899         return *error_ppm + 10 < best_error_ppm;
900 }
901
902 /*
903  * Returns a set of divisors for the desired target clock with the given
904  * refclk, or FALSE.  The returned values represent the clock equation:
905  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
906  */
907 static bool
908 vlv_find_best_dpll(const struct intel_limit *limit,
909                    struct intel_crtc_state *crtc_state,
910                    int target, int refclk, struct dpll *match_clock,
911                    struct dpll *best_clock)
912 {
913         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
914         struct drm_device *dev = crtc->base.dev;
915         struct dpll clock;
916         unsigned int bestppm = 1000000;
917         /* min update 19.2 MHz */
918         int max_n = min(limit->n.max, refclk / 19200);
919         bool found = false;
920
921         target *= 5; /* fast clock */
922
923         memset(best_clock, 0, sizeof(*best_clock));
924
925         /* based on hardware requirement, prefer smaller n to precision */
926         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
927                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
928                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
929                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
930                                 clock.p = clock.p1 * clock.p2;
931                                 /* based on hardware requirement, prefer bigger m1,m2 values */
932                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
933                                         unsigned int ppm;
934
935                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
936                                                                      refclk * clock.m1);
937
938                                         vlv_calc_dpll_params(refclk, &clock);
939
940                                         if (!intel_pll_is_valid(to_i915(dev),
941                                                                 limit,
942                                                                 &clock))
943                                                 continue;
944
945                                         if (!vlv_PLL_is_optimal(dev, target,
946                                                                 &clock,
947                                                                 best_clock,
948                                                                 bestppm, &ppm))
949                                                 continue;
950
951                                         *best_clock = clock;
952                                         bestppm = ppm;
953                                         found = true;
954                                 }
955                         }
956                 }
957         }
958
959         return found;
960 }
961
962 /*
963  * Returns a set of divisors for the desired target clock with the given
964  * refclk, or FALSE.  The returned values represent the clock equation:
965  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
966  */
967 static bool
968 chv_find_best_dpll(const struct intel_limit *limit,
969                    struct intel_crtc_state *crtc_state,
970                    int target, int refclk, struct dpll *match_clock,
971                    struct dpll *best_clock)
972 {
973         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
974         struct drm_device *dev = crtc->base.dev;
975         unsigned int best_error_ppm;
976         struct dpll clock;
977         u64 m2;
978         int found = false;
979
980         memset(best_clock, 0, sizeof(*best_clock));
981         best_error_ppm = 1000000;
982
983         /*
984          * Based on hardware doc, the n always set to 1, and m1 always
985          * set to 2.  If requires to support 200Mhz refclk, we need to
986          * revisit this because n may not 1 anymore.
987          */
988         clock.n = 1, clock.m1 = 2;
989         target *= 5;    /* fast clock */
990
991         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
992                 for (clock.p2 = limit->p2.p2_fast;
993                                 clock.p2 >= limit->p2.p2_slow;
994                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
995                         unsigned int error_ppm;
996
997                         clock.p = clock.p1 * clock.p2;
998
999                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1000                                                    refclk * clock.m1);
1001
1002                         if (m2 > INT_MAX/clock.m1)
1003                                 continue;
1004
1005                         clock.m2 = m2;
1006
1007                         chv_calc_dpll_params(refclk, &clock);
1008
1009                         if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
1010                                 continue;
1011
1012                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1013                                                 best_error_ppm, &error_ppm))
1014                                 continue;
1015
1016                         *best_clock = clock;
1017                         best_error_ppm = error_ppm;
1018                         found = true;
1019                 }
1020         }
1021
1022         return found;
1023 }
1024
1025 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1026                         struct dpll *best_clock)
1027 {
1028         int refclk = 100000;
1029         const struct intel_limit *limit = &intel_limits_bxt;
1030
1031         return chv_find_best_dpll(limit, crtc_state,
1032                                   crtc_state->port_clock, refclk,
1033                                   NULL, best_clock);
1034 }
1035
1036 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1037                                     enum pipe pipe)
1038 {
1039         i915_reg_t reg = PIPEDSL(pipe);
1040         u32 line1, line2;
1041         u32 line_mask;
1042
1043         if (IS_GEN(dev_priv, 2))
1044                 line_mask = DSL_LINEMASK_GEN2;
1045         else
1046                 line_mask = DSL_LINEMASK_GEN3;
1047
1048         line1 = intel_de_read(dev_priv, reg) & line_mask;
1049         msleep(5);
1050         line2 = intel_de_read(dev_priv, reg) & line_mask;
1051
1052         return line1 != line2;
1053 }
1054
1055 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1056 {
1057         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1058         enum pipe pipe = crtc->pipe;
1059
1060         /* Wait for the display line to settle/start moving */
1061         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1062                 drm_err(&dev_priv->drm,
1063                         "pipe %c scanline %s wait timed out\n",
1064                         pipe_name(pipe), onoff(state));
1065 }
1066
1067 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1068 {
1069         wait_for_pipe_scanline_moving(crtc, false);
1070 }
1071
1072 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1073 {
1074         wait_for_pipe_scanline_moving(crtc, true);
1075 }
1076
1077 static void
1078 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1079 {
1080         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1081         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1082
1083         if (INTEL_GEN(dev_priv) >= 4) {
1084                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1085                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1086
1087                 /* Wait for the Pipe State to go off */
1088                 if (intel_de_wait_for_clear(dev_priv, reg,
1089                                             I965_PIPECONF_ACTIVE, 100))
1090                         drm_WARN(&dev_priv->drm, 1,
1091                                  "pipe_off wait timed out\n");
1092         } else {
1093                 intel_wait_for_pipe_scanline_stopped(crtc);
1094         }
1095 }
1096
1097 /* Only for pre-ILK configs */
1098 void assert_pll(struct drm_i915_private *dev_priv,
1099                 enum pipe pipe, bool state)
1100 {
1101         u32 val;
1102         bool cur_state;
1103
1104         val = intel_de_read(dev_priv, DPLL(pipe));
1105         cur_state = !!(val & DPLL_VCO_ENABLE);
1106         I915_STATE_WARN(cur_state != state,
1107              "PLL state assertion failure (expected %s, current %s)\n",
1108                         onoff(state), onoff(cur_state));
1109 }
1110
1111 /* XXX: the dsi pll is shared between MIPI DSI ports */
1112 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1113 {
1114         u32 val;
1115         bool cur_state;
1116
1117         vlv_cck_get(dev_priv);
1118         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1119         vlv_cck_put(dev_priv);
1120
1121         cur_state = val & DSI_PLL_VCO_EN;
1122         I915_STATE_WARN(cur_state != state,
1123              "DSI PLL state assertion failure (expected %s, current %s)\n",
1124                         onoff(state), onoff(cur_state));
1125 }
1126
1127 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1128                           enum pipe pipe, bool state)
1129 {
1130         bool cur_state;
1131
1132         if (HAS_DDI(dev_priv)) {
1133                 /*
1134                  * DDI does not have a specific FDI_TX register.
1135                  *
1136                  * FDI is never fed from EDP transcoder
1137                  * so pipe->transcoder cast is fine here.
1138                  */
1139                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
1140                 u32 val = intel_de_read(dev_priv,
1141                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
1142                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1143         } else {
1144                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1145                 cur_state = !!(val & FDI_TX_ENABLE);
1146         }
1147         I915_STATE_WARN(cur_state != state,
1148              "FDI TX state assertion failure (expected %s, current %s)\n",
1149                         onoff(state), onoff(cur_state));
1150 }
1151 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1152 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1153
1154 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1155                           enum pipe pipe, bool state)
1156 {
1157         u32 val;
1158         bool cur_state;
1159
1160         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1161         cur_state = !!(val & FDI_RX_ENABLE);
1162         I915_STATE_WARN(cur_state != state,
1163              "FDI RX state assertion failure (expected %s, current %s)\n",
1164                         onoff(state), onoff(cur_state));
1165 }
1166 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1167 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1168
1169 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1170                                       enum pipe pipe)
1171 {
1172         u32 val;
1173
1174         /* ILK FDI PLL is always enabled */
1175         if (IS_GEN(dev_priv, 5))
1176                 return;
1177
1178         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1179         if (HAS_DDI(dev_priv))
1180                 return;
1181
1182         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1183         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1184 }
1185
1186 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1187                        enum pipe pipe, bool state)
1188 {
1189         u32 val;
1190         bool cur_state;
1191
1192         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1193         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1194         I915_STATE_WARN(cur_state != state,
1195              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1196                         onoff(state), onoff(cur_state));
1197 }
1198
1199 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1200 {
1201         i915_reg_t pp_reg;
1202         u32 val;
1203         enum pipe panel_pipe = INVALID_PIPE;
1204         bool locked = true;
1205
1206         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1207                 return;
1208
1209         if (HAS_PCH_SPLIT(dev_priv)) {
1210                 u32 port_sel;
1211
1212                 pp_reg = PP_CONTROL(0);
1213                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1214
1215                 switch (port_sel) {
1216                 case PANEL_PORT_SELECT_LVDS:
1217                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1218                         break;
1219                 case PANEL_PORT_SELECT_DPA:
1220                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1221                         break;
1222                 case PANEL_PORT_SELECT_DPC:
1223                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1224                         break;
1225                 case PANEL_PORT_SELECT_DPD:
1226                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1227                         break;
1228                 default:
1229                         MISSING_CASE(port_sel);
1230                         break;
1231                 }
1232         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1233                 /* presumably write lock depends on pipe, not port select */
1234                 pp_reg = PP_CONTROL(pipe);
1235                 panel_pipe = pipe;
1236         } else {
1237                 u32 port_sel;
1238
1239                 pp_reg = PP_CONTROL(0);
1240                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1241
1242                 drm_WARN_ON(&dev_priv->drm,
1243                             port_sel != PANEL_PORT_SELECT_LVDS);
1244                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1245         }
1246
1247         val = intel_de_read(dev_priv, pp_reg);
1248         if (!(val & PANEL_POWER_ON) ||
1249             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1250                 locked = false;
1251
1252         I915_STATE_WARN(panel_pipe == pipe && locked,
1253              "panel assertion failure, pipe %c regs locked\n",
1254              pipe_name(pipe));
1255 }
1256
1257 void assert_pipe(struct drm_i915_private *dev_priv,
1258                  enum transcoder cpu_transcoder, bool state)
1259 {
1260         bool cur_state;
1261         enum intel_display_power_domain power_domain;
1262         intel_wakeref_t wakeref;
1263
1264         /* we keep both pipes enabled on 830 */
1265         if (IS_I830(dev_priv))
1266                 state = true;
1267
1268         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1269         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1270         if (wakeref) {
1271                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1272                 cur_state = !!(val & PIPECONF_ENABLE);
1273
1274                 intel_display_power_put(dev_priv, power_domain, wakeref);
1275         } else {
1276                 cur_state = false;
1277         }
1278
1279         I915_STATE_WARN(cur_state != state,
1280                         "transcoder %s assertion failure (expected %s, current %s)\n",
1281                         transcoder_name(cpu_transcoder),
1282                         onoff(state), onoff(cur_state));
1283 }
1284
1285 static void assert_plane(struct intel_plane *plane, bool state)
1286 {
1287         enum pipe pipe;
1288         bool cur_state;
1289
1290         cur_state = plane->get_hw_state(plane, &pipe);
1291
1292         I915_STATE_WARN(cur_state != state,
1293                         "%s assertion failure (expected %s, current %s)\n",
1294                         plane->base.name, onoff(state), onoff(cur_state));
1295 }
1296
1297 #define assert_plane_enabled(p) assert_plane(p, true)
1298 #define assert_plane_disabled(p) assert_plane(p, false)
1299
1300 static void assert_planes_disabled(struct intel_crtc *crtc)
1301 {
1302         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1303         struct intel_plane *plane;
1304
1305         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1306                 assert_plane_disabled(plane);
1307 }
1308
1309 static void assert_vblank_disabled(struct drm_crtc *crtc)
1310 {
1311         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1312                 drm_crtc_vblank_put(crtc);
1313 }
1314
1315 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1316                                     enum pipe pipe)
1317 {
1318         u32 val;
1319         bool enabled;
1320
1321         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1322         enabled = !!(val & TRANS_ENABLE);
1323         I915_STATE_WARN(enabled,
1324              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1325              pipe_name(pipe));
1326 }
1327
1328 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1329                                    enum pipe pipe, enum port port,
1330                                    i915_reg_t dp_reg)
1331 {
1332         enum pipe port_pipe;
1333         bool state;
1334
1335         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1336
1337         I915_STATE_WARN(state && port_pipe == pipe,
1338                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1339                         port_name(port), pipe_name(pipe));
1340
1341         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1342                         "IBX PCH DP %c still using transcoder B\n",
1343                         port_name(port));
1344 }
1345
1346 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1347                                      enum pipe pipe, enum port port,
1348                                      i915_reg_t hdmi_reg)
1349 {
1350         enum pipe port_pipe;
1351         bool state;
1352
1353         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1354
1355         I915_STATE_WARN(state && port_pipe == pipe,
1356                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1357                         port_name(port), pipe_name(pipe));
1358
1359         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1360                         "IBX PCH HDMI %c still using transcoder B\n",
1361                         port_name(port));
1362 }
1363
1364 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1365                                       enum pipe pipe)
1366 {
1367         enum pipe port_pipe;
1368
1369         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1370         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1371         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1372
1373         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1374                         port_pipe == pipe,
1375                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1376                         pipe_name(pipe));
1377
1378         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1379                         port_pipe == pipe,
1380                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1381                         pipe_name(pipe));
1382
1383         /* PCH SDVOB multiplex with HDMIB */
1384         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1385         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1386         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1387 }
1388
1389 static void _vlv_enable_pll(struct intel_crtc *crtc,
1390                             const struct intel_crtc_state *pipe_config)
1391 {
1392         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1393         enum pipe pipe = crtc->pipe;
1394
1395         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1396         intel_de_posting_read(dev_priv, DPLL(pipe));
1397         udelay(150);
1398
1399         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1400                 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1401 }
1402
1403 static void vlv_enable_pll(struct intel_crtc *crtc,
1404                            const struct intel_crtc_state *pipe_config)
1405 {
1406         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1407         enum pipe pipe = crtc->pipe;
1408
1409         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1410
1411         /* PLL is protected by panel, make sure we can write it */
1412         assert_panel_unlocked(dev_priv, pipe);
1413
1414         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1415                 _vlv_enable_pll(crtc, pipe_config);
1416
1417         intel_de_write(dev_priv, DPLL_MD(pipe),
1418                        pipe_config->dpll_hw_state.dpll_md);
1419         intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1420 }
1421
1422
1423 static void _chv_enable_pll(struct intel_crtc *crtc,
1424                             const struct intel_crtc_state *pipe_config)
1425 {
1426         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1427         enum pipe pipe = crtc->pipe;
1428         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1429         u32 tmp;
1430
1431         vlv_dpio_get(dev_priv);
1432
1433         /* Enable back the 10bit clock to display controller */
1434         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1435         tmp |= DPIO_DCLKP_EN;
1436         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1437
1438         vlv_dpio_put(dev_priv);
1439
1440         /*
1441          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1442          */
1443         udelay(1);
1444
1445         /* Enable PLL */
1446         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1447
1448         /* Check PLL is locked */
1449         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1450                 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1451 }
1452
1453 static void chv_enable_pll(struct intel_crtc *crtc,
1454                            const struct intel_crtc_state *pipe_config)
1455 {
1456         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1457         enum pipe pipe = crtc->pipe;
1458
1459         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1460
1461         /* PLL is protected by panel, make sure we can write it */
1462         assert_panel_unlocked(dev_priv, pipe);
1463
1464         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1465                 _chv_enable_pll(crtc, pipe_config);
1466
1467         if (pipe != PIPE_A) {
1468                 /*
1469                  * WaPixelRepeatModeFixForC0:chv
1470                  *
1471                  * DPLLCMD is AWOL. Use chicken bits to propagate
1472                  * the value from DPLLBMD to either pipe B or C.
1473                  */
1474                 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1475                 intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1476                                pipe_config->dpll_hw_state.dpll_md);
1477                 intel_de_write(dev_priv, CBR4_VLV, 0);
1478                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1479
1480                 /*
1481                  * DPLLB VGA mode also seems to cause problems.
1482                  * We should always have it disabled.
1483                  */
1484                 drm_WARN_ON(&dev_priv->drm,
1485                             (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1486                              DPLL_VGA_MODE_DIS) == 0);
1487         } else {
1488                 intel_de_write(dev_priv, DPLL_MD(pipe),
1489                                pipe_config->dpll_hw_state.dpll_md);
1490                 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1491         }
1492 }
1493
1494 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1495 {
1496         if (IS_I830(dev_priv))
1497                 return false;
1498
1499         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1500 }
1501
1502 static void i9xx_enable_pll(struct intel_crtc *crtc,
1503                             const struct intel_crtc_state *crtc_state)
1504 {
1505         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1506         i915_reg_t reg = DPLL(crtc->pipe);
1507         u32 dpll = crtc_state->dpll_hw_state.dpll;
1508         int i;
1509
1510         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1511
1512         /* PLL is protected by panel, make sure we can write it */
1513         if (i9xx_has_pps(dev_priv))
1514                 assert_panel_unlocked(dev_priv, crtc->pipe);
1515
1516         /*
1517          * Apparently we need to have VGA mode enabled prior to changing
1518          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1519          * dividers, even though the register value does change.
1520          */
1521         intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1522         intel_de_write(dev_priv, reg, dpll);
1523
1524         /* Wait for the clocks to stabilize. */
1525         intel_de_posting_read(dev_priv, reg);
1526         udelay(150);
1527
1528         if (INTEL_GEN(dev_priv) >= 4) {
1529                 intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1530                                crtc_state->dpll_hw_state.dpll_md);
1531         } else {
1532                 /* The pixel multiplier can only be updated once the
1533                  * DPLL is enabled and the clocks are stable.
1534                  *
1535                  * So write it again.
1536                  */
1537                 intel_de_write(dev_priv, reg, dpll);
1538         }
1539
1540         /* We do this three times for luck */
1541         for (i = 0; i < 3; i++) {
1542                 intel_de_write(dev_priv, reg, dpll);
1543                 intel_de_posting_read(dev_priv, reg);
1544                 udelay(150); /* wait for warmup */
1545         }
1546 }
1547
1548 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1549 {
1550         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1551         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1552         enum pipe pipe = crtc->pipe;
1553
1554         /* Don't disable pipe or pipe PLLs if needed */
1555         if (IS_I830(dev_priv))
1556                 return;
1557
1558         /* Make sure the pipe isn't still relying on us */
1559         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1560
1561         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1562         intel_de_posting_read(dev_priv, DPLL(pipe));
1563 }
1564
1565 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1566 {
1567         u32 val;
1568
1569         /* Make sure the pipe isn't still relying on us */
1570         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1571
1572         val = DPLL_INTEGRATED_REF_CLK_VLV |
1573                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1574         if (pipe != PIPE_A)
1575                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1576
1577         intel_de_write(dev_priv, DPLL(pipe), val);
1578         intel_de_posting_read(dev_priv, DPLL(pipe));
1579 }
1580
1581 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1582 {
1583         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1584         u32 val;
1585
1586         /* Make sure the pipe isn't still relying on us */
1587         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1588
1589         val = DPLL_SSC_REF_CLK_CHV |
1590                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1591         if (pipe != PIPE_A)
1592                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1593
1594         intel_de_write(dev_priv, DPLL(pipe), val);
1595         intel_de_posting_read(dev_priv, DPLL(pipe));
1596
1597         vlv_dpio_get(dev_priv);
1598
1599         /* Disable 10bit clock to display controller */
1600         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1601         val &= ~DPIO_DCLKP_EN;
1602         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1603
1604         vlv_dpio_put(dev_priv);
1605 }
1606
1607 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1608                          struct intel_digital_port *dport,
1609                          unsigned int expected_mask)
1610 {
1611         u32 port_mask;
1612         i915_reg_t dpll_reg;
1613
1614         switch (dport->base.port) {
1615         case PORT_B:
1616                 port_mask = DPLL_PORTB_READY_MASK;
1617                 dpll_reg = DPLL(0);
1618                 break;
1619         case PORT_C:
1620                 port_mask = DPLL_PORTC_READY_MASK;
1621                 dpll_reg = DPLL(0);
1622                 expected_mask <<= 4;
1623                 break;
1624         case PORT_D:
1625                 port_mask = DPLL_PORTD_READY_MASK;
1626                 dpll_reg = DPIO_PHY_STATUS;
1627                 break;
1628         default:
1629                 BUG();
1630         }
1631
1632         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1633                                        port_mask, expected_mask, 1000))
1634                 drm_WARN(&dev_priv->drm, 1,
1635                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1636                          dport->base.base.base.id, dport->base.base.name,
1637                          intel_de_read(dev_priv, dpll_reg) & port_mask,
1638                          expected_mask);
1639 }
1640
1641 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1642 {
1643         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1644         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1645         enum pipe pipe = crtc->pipe;
1646         i915_reg_t reg;
1647         u32 val, pipeconf_val;
1648
1649         /* Make sure PCH DPLL is enabled */
1650         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1651
1652         /* FDI must be feeding us bits for PCH ports */
1653         assert_fdi_tx_enabled(dev_priv, pipe);
1654         assert_fdi_rx_enabled(dev_priv, pipe);
1655
1656         if (HAS_PCH_CPT(dev_priv)) {
1657                 reg = TRANS_CHICKEN2(pipe);
1658                 val = intel_de_read(dev_priv, reg);
1659                 /*
1660                  * Workaround: Set the timing override bit
1661                  * before enabling the pch transcoder.
1662                  */
1663                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1664                 /* Configure frame start delay to match the CPU */
1665                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1666                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1667                 intel_de_write(dev_priv, reg, val);
1668         }
1669
1670         reg = PCH_TRANSCONF(pipe);
1671         val = intel_de_read(dev_priv, reg);
1672         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1673
1674         if (HAS_PCH_IBX(dev_priv)) {
1675                 /* Configure frame start delay to match the CPU */
1676                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1677                 val |= TRANS_FRAME_START_DELAY(0);
1678
1679                 /*
1680                  * Make the BPC in transcoder be consistent with
1681                  * that in pipeconf reg. For HDMI we must use 8bpc
1682                  * here for both 8bpc and 12bpc.
1683                  */
1684                 val &= ~PIPECONF_BPC_MASK;
1685                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1686                         val |= PIPECONF_8BPC;
1687                 else
1688                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1689         }
1690
1691         val &= ~TRANS_INTERLACE_MASK;
1692         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1693                 if (HAS_PCH_IBX(dev_priv) &&
1694                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1695                         val |= TRANS_LEGACY_INTERLACED_ILK;
1696                 else
1697                         val |= TRANS_INTERLACED;
1698         } else {
1699                 val |= TRANS_PROGRESSIVE;
1700         }
1701
1702         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1703         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1704                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1705                         pipe_name(pipe));
1706 }
1707
1708 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1709                                       enum transcoder cpu_transcoder)
1710 {
1711         u32 val, pipeconf_val;
1712
1713         /* FDI must be feeding us bits for PCH ports */
1714         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1715         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1716
1717         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1718         /* Workaround: set timing override bit. */
1719         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1720         /* Configure frame start delay to match the CPU */
1721         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1722         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1723         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1724
1725         val = TRANS_ENABLE;
1726         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1727
1728         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1729             PIPECONF_INTERLACED_ILK)
1730                 val |= TRANS_INTERLACED;
1731         else
1732                 val |= TRANS_PROGRESSIVE;
1733
1734         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1735         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1736                                   TRANS_STATE_ENABLE, 100))
1737                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1738 }
1739
1740 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1741                                        enum pipe pipe)
1742 {
1743         i915_reg_t reg;
1744         u32 val;
1745
1746         /* FDI relies on the transcoder */
1747         assert_fdi_tx_disabled(dev_priv, pipe);
1748         assert_fdi_rx_disabled(dev_priv, pipe);
1749
1750         /* Ports must be off as well */
1751         assert_pch_ports_disabled(dev_priv, pipe);
1752
1753         reg = PCH_TRANSCONF(pipe);
1754         val = intel_de_read(dev_priv, reg);
1755         val &= ~TRANS_ENABLE;
1756         intel_de_write(dev_priv, reg, val);
1757         /* wait for PCH transcoder off, transcoder state */
1758         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1759                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1760                         pipe_name(pipe));
1761
1762         if (HAS_PCH_CPT(dev_priv)) {
1763                 /* Workaround: Clear the timing override chicken bit again. */
1764                 reg = TRANS_CHICKEN2(pipe);
1765                 val = intel_de_read(dev_priv, reg);
1766                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1767                 intel_de_write(dev_priv, reg, val);
1768         }
1769 }
1770
1771 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1772 {
1773         u32 val;
1774
1775         val = intel_de_read(dev_priv, LPT_TRANSCONF);
1776         val &= ~TRANS_ENABLE;
1777         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1778         /* wait for PCH transcoder off, transcoder state */
1779         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1780                                     TRANS_STATE_ENABLE, 50))
1781                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1782
1783         /* Workaround: clear timing override bit. */
1784         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1785         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1786         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1787 }
1788
1789 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1790 {
1791         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1792
1793         if (HAS_PCH_LPT(dev_priv))
1794                 return PIPE_A;
1795         else
1796                 return crtc->pipe;
1797 }
1798
1799 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1800 {
1801         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1802
1803         /*
1804          * On i965gm the hardware frame counter reads
1805          * zero when the TV encoder is enabled :(
1806          */
1807         if (IS_I965GM(dev_priv) &&
1808             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1809                 return 0;
1810
1811         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1812                 return 0xffffffff; /* full 32 bit counter */
1813         else if (INTEL_GEN(dev_priv) >= 3)
1814                 return 0xffffff; /* only 24 bits of frame count */
1815         else
1816                 return 0; /* Gen2 doesn't have a hardware frame counter */
1817 }
1818
1819 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1820 {
1821         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1822
1823         assert_vblank_disabled(&crtc->base);
1824         drm_crtc_set_max_vblank_count(&crtc->base,
1825                                       intel_crtc_max_vblank_count(crtc_state));
1826         drm_crtc_vblank_on(&crtc->base);
1827 }
1828
1829 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1830 {
1831         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1832
1833         drm_crtc_vblank_off(&crtc->base);
1834         assert_vblank_disabled(&crtc->base);
1835 }
1836
1837 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1838 {
1839         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1840         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1841         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1842         enum pipe pipe = crtc->pipe;
1843         i915_reg_t reg;
1844         u32 val;
1845
1846         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1847
1848         assert_planes_disabled(crtc);
1849
1850         /*
1851          * A pipe without a PLL won't actually be able to drive bits from
1852          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1853          * need the check.
1854          */
1855         if (HAS_GMCH(dev_priv)) {
1856                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1857                         assert_dsi_pll_enabled(dev_priv);
1858                 else
1859                         assert_pll_enabled(dev_priv, pipe);
1860         } else {
1861                 if (new_crtc_state->has_pch_encoder) {
1862                         /* if driving the PCH, we need FDI enabled */
1863                         assert_fdi_rx_pll_enabled(dev_priv,
1864                                                   intel_crtc_pch_transcoder(crtc));
1865                         assert_fdi_tx_pll_enabled(dev_priv,
1866                                                   (enum pipe) cpu_transcoder);
1867                 }
1868                 /* FIXME: assert CPU port conditions for SNB+ */
1869         }
1870
1871         trace_intel_pipe_enable(crtc);
1872
1873         reg = PIPECONF(cpu_transcoder);
1874         val = intel_de_read(dev_priv, reg);
1875         if (val & PIPECONF_ENABLE) {
1876                 /* we keep both pipes enabled on 830 */
1877                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1878                 return;
1879         }
1880
1881         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1882         intel_de_posting_read(dev_priv, reg);
1883
1884         /*
1885          * Until the pipe starts PIPEDSL reads will return a stale value,
1886          * which causes an apparent vblank timestamp jump when PIPEDSL
1887          * resets to its proper value. That also messes up the frame count
1888          * when it's derived from the timestamps. So let's wait for the
1889          * pipe to start properly before we call drm_crtc_vblank_on()
1890          */
1891         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1892                 intel_wait_for_pipe_scanline_moving(crtc);
1893 }
1894
1895 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1896 {
1897         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1898         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1899         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1900         enum pipe pipe = crtc->pipe;
1901         i915_reg_t reg;
1902         u32 val;
1903
1904         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1905
1906         /*
1907          * Make sure planes won't keep trying to pump pixels to us,
1908          * or we might hang the display.
1909          */
1910         assert_planes_disabled(crtc);
1911
1912         trace_intel_pipe_disable(crtc);
1913
1914         reg = PIPECONF(cpu_transcoder);
1915         val = intel_de_read(dev_priv, reg);
1916         if ((val & PIPECONF_ENABLE) == 0)
1917                 return;
1918
1919         /*
1920          * Double wide has implications for planes
1921          * so best keep it disabled when not needed.
1922          */
1923         if (old_crtc_state->double_wide)
1924                 val &= ~PIPECONF_DOUBLE_WIDE;
1925
1926         /* Don't disable pipe or pipe PLLs if needed */
1927         if (!IS_I830(dev_priv))
1928                 val &= ~PIPECONF_ENABLE;
1929
1930         intel_de_write(dev_priv, reg, val);
1931         if ((val & PIPECONF_ENABLE) == 0)
1932                 intel_wait_for_pipe_off(old_crtc_state);
1933 }
1934
1935 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1936 {
1937         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1938 }
1939
1940 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1941 {
1942         if (!is_ccs_modifier(fb->modifier))
1943                 return false;
1944
1945         return plane >= fb->format->num_planes / 2;
1946 }
1947
1948 static bool is_gen12_ccs_modifier(u64 modifier)
1949 {
1950         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1951                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1952
1953 }
1954
1955 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1956 {
1957         return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1958 }
1959
1960 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1961 {
1962         if (is_ccs_modifier(fb->modifier))
1963                 return is_ccs_plane(fb, plane);
1964
1965         return plane == 1;
1966 }
1967
1968 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1969 {
1970         WARN_ON(!is_ccs_modifier(fb->modifier) ||
1971                 (main_plane && main_plane >= fb->format->num_planes / 2));
1972
1973         return fb->format->num_planes / 2 + main_plane;
1974 }
1975
1976 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1977 {
1978         WARN_ON(!is_ccs_modifier(fb->modifier) ||
1979                 ccs_plane < fb->format->num_planes / 2);
1980
1981         return ccs_plane - fb->format->num_planes / 2;
1982 }
1983
1984 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
1985 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1986 {
1987         if (is_ccs_modifier(fb->modifier))
1988                 return main_to_ccs_plane(fb, main_plane);
1989
1990         return 1;
1991 }
1992
1993 bool
1994 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1995                                     uint64_t modifier)
1996 {
1997         return info->is_yuv &&
1998                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1999 }
2000
2001 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
2002                                    int color_plane)
2003 {
2004         return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
2005                color_plane == 1;
2006 }
2007
2008 static unsigned int
2009 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
2010 {
2011         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2012         unsigned int cpp = fb->format->cpp[color_plane];
2013
2014         switch (fb->modifier) {
2015         case DRM_FORMAT_MOD_LINEAR:
2016                 return intel_tile_size(dev_priv);
2017         case I915_FORMAT_MOD_X_TILED:
2018                 if (IS_GEN(dev_priv, 2))
2019                         return 128;
2020                 else
2021                         return 512;
2022         case I915_FORMAT_MOD_Y_TILED_CCS:
2023                 if (is_ccs_plane(fb, color_plane))
2024                         return 128;
2025                 /* fall through */
2026         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2027         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2028                 if (is_ccs_plane(fb, color_plane))
2029                         return 64;
2030                 /* fall through */
2031         case I915_FORMAT_MOD_Y_TILED:
2032                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2033                         return 128;
2034                 else
2035                         return 512;
2036         case I915_FORMAT_MOD_Yf_TILED_CCS:
2037                 if (is_ccs_plane(fb, color_plane))
2038                         return 128;
2039                 /* fall through */
2040         case I915_FORMAT_MOD_Yf_TILED:
2041                 switch (cpp) {
2042                 case 1:
2043                         return 64;
2044                 case 2:
2045                 case 4:
2046                         return 128;
2047                 case 8:
2048                 case 16:
2049                         return 256;
2050                 default:
2051                         MISSING_CASE(cpp);
2052                         return cpp;
2053                 }
2054                 break;
2055         default:
2056                 MISSING_CASE(fb->modifier);
2057                 return cpp;
2058         }
2059 }
2060
2061 static unsigned int
2062 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2063 {
2064         if (is_gen12_ccs_plane(fb, color_plane))
2065                 return 1;
2066
2067         return intel_tile_size(to_i915(fb->dev)) /
2068                 intel_tile_width_bytes(fb, color_plane);
2069 }
2070
2071 /* Return the tile dimensions in pixel units */
2072 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2073                             unsigned int *tile_width,
2074                             unsigned int *tile_height)
2075 {
2076         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2077         unsigned int cpp = fb->format->cpp[color_plane];
2078
2079         *tile_width = tile_width_bytes / cpp;
2080         *tile_height = intel_tile_height(fb, color_plane);
2081 }
2082
2083 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2084                                         int color_plane)
2085 {
2086         unsigned int tile_width, tile_height;
2087
2088         intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2089
2090         return fb->pitches[color_plane] * tile_height;
2091 }
2092
2093 unsigned int
2094 intel_fb_align_height(const struct drm_framebuffer *fb,
2095                       int color_plane, unsigned int height)
2096 {
2097         unsigned int tile_height = intel_tile_height(fb, color_plane);
2098
2099         return ALIGN(height, tile_height);
2100 }
2101
2102 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2103 {
2104         unsigned int size = 0;
2105         int i;
2106
2107         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2108                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2109
2110         return size;
2111 }
2112
2113 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2114 {
2115         unsigned int size = 0;
2116         int i;
2117
2118         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2119                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2120
2121         return size;
2122 }
2123
2124 static void
2125 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2126                         const struct drm_framebuffer *fb,
2127                         unsigned int rotation)
2128 {
2129         view->type = I915_GGTT_VIEW_NORMAL;
2130         if (drm_rotation_90_or_270(rotation)) {
2131                 view->type = I915_GGTT_VIEW_ROTATED;
2132                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2133         }
2134 }
2135
2136 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2137 {
2138         if (IS_I830(dev_priv))
2139                 return 16 * 1024;
2140         else if (IS_I85X(dev_priv))
2141                 return 256;
2142         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2143                 return 32;
2144         else
2145                 return 4 * 1024;
2146 }
2147
2148 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2149 {
2150         if (INTEL_GEN(dev_priv) >= 9)
2151                 return 256 * 1024;
2152         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2153                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2154                 return 128 * 1024;
2155         else if (INTEL_GEN(dev_priv) >= 4)
2156                 return 4 * 1024;
2157         else
2158                 return 0;
2159 }
2160
2161 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2162                                          int color_plane)
2163 {
2164         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2165
2166         /* AUX_DIST needs only 4K alignment */
2167         if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2168             is_ccs_plane(fb, color_plane))
2169                 return 4096;
2170
2171         switch (fb->modifier) {
2172         case DRM_FORMAT_MOD_LINEAR:
2173                 return intel_linear_alignment(dev_priv);
2174         case I915_FORMAT_MOD_X_TILED:
2175                 if (INTEL_GEN(dev_priv) >= 9)
2176                         return 256 * 1024;
2177                 return 0;
2178         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2179                 if (is_semiplanar_uv_plane(fb, color_plane))
2180                         return intel_tile_row_size(fb, color_plane);
2181                 /* Fall-through */
2182         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2183                 return 16 * 1024;
2184         case I915_FORMAT_MOD_Y_TILED_CCS:
2185         case I915_FORMAT_MOD_Yf_TILED_CCS:
2186         case I915_FORMAT_MOD_Y_TILED:
2187                 if (INTEL_GEN(dev_priv) >= 12 &&
2188                     is_semiplanar_uv_plane(fb, color_plane))
2189                         return intel_tile_row_size(fb, color_plane);
2190                 /* Fall-through */
2191         case I915_FORMAT_MOD_Yf_TILED:
2192                 return 1 * 1024 * 1024;
2193         default:
2194                 MISSING_CASE(fb->modifier);
2195                 return 0;
2196         }
2197 }
2198
2199 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2200 {
2201         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2202         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2203
2204         return INTEL_GEN(dev_priv) < 4 ||
2205                 (plane->has_fbc &&
2206                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2207 }
2208
2209 struct i915_vma *
2210 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2211                            const struct i915_ggtt_view *view,
2212                            bool uses_fence,
2213                            unsigned long *out_flags)
2214 {
2215         struct drm_device *dev = fb->dev;
2216         struct drm_i915_private *dev_priv = to_i915(dev);
2217         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2218         intel_wakeref_t wakeref;
2219         struct i915_vma *vma;
2220         unsigned int pinctl;
2221         u32 alignment;
2222
2223         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
2224                 return ERR_PTR(-EINVAL);
2225
2226         alignment = intel_surf_alignment(fb, 0);
2227         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
2228                 return ERR_PTR(-EINVAL);
2229
2230         /* Note that the w/a also requires 64 PTE of padding following the
2231          * bo. We currently fill all unused PTE with the shadow page and so
2232          * we should always have valid PTE following the scanout preventing
2233          * the VT-d warning.
2234          */
2235         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2236                 alignment = 256 * 1024;
2237
2238         /*
2239          * Global gtt pte registers are special registers which actually forward
2240          * writes to a chunk of system memory. Which means that there is no risk
2241          * that the register values disappear as soon as we call
2242          * intel_runtime_pm_put(), so it is correct to wrap only the
2243          * pin/unpin/fence and not more.
2244          */
2245         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2246
2247         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2248
2249         /*
2250          * Valleyview is definitely limited to scanning out the first
2251          * 512MiB. Lets presume this behaviour was inherited from the
2252          * g4x display engine and that all earlier gen are similarly
2253          * limited. Testing suggests that it is a little more
2254          * complicated than this. For example, Cherryview appears quite
2255          * happy to scanout from anywhere within its global aperture.
2256          */
2257         pinctl = 0;
2258         if (HAS_GMCH(dev_priv))
2259                 pinctl |= PIN_MAPPABLE;
2260
2261         vma = i915_gem_object_pin_to_display_plane(obj,
2262                                                    alignment, view, pinctl);
2263         if (IS_ERR(vma))
2264                 goto err;
2265
2266         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2267                 int ret;
2268
2269                 /*
2270                  * Install a fence for tiled scan-out. Pre-i965 always needs a
2271                  * fence, whereas 965+ only requires a fence if using
2272                  * framebuffer compression.  For simplicity, we always, when
2273                  * possible, install a fence as the cost is not that onerous.
2274                  *
2275                  * If we fail to fence the tiled scanout, then either the
2276                  * modeset will reject the change (which is highly unlikely as
2277                  * the affected systems, all but one, do not have unmappable
2278                  * space) or we will not be able to enable full powersaving
2279                  * techniques (also likely not to apply due to various limits
2280                  * FBC and the like impose on the size of the buffer, which
2281                  * presumably we violated anyway with this unmappable buffer).
2282                  * Anyway, it is presumably better to stumble onwards with
2283                  * something and try to run the system in a "less than optimal"
2284                  * mode that matches the user configuration.
2285                  */
2286                 ret = i915_vma_pin_fence(vma);
2287                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2288                         i915_gem_object_unpin_from_display_plane(vma);
2289                         vma = ERR_PTR(ret);
2290                         goto err;
2291                 }
2292
2293                 if (ret == 0 && vma->fence)
2294                         *out_flags |= PLANE_HAS_FENCE;
2295         }
2296
2297         i915_vma_get(vma);
2298 err:
2299         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2300         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2301         return vma;
2302 }
2303
2304 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2305 {
2306         i915_gem_object_lock(vma->obj);
2307         if (flags & PLANE_HAS_FENCE)
2308                 i915_vma_unpin_fence(vma);
2309         i915_gem_object_unpin_from_display_plane(vma);
2310         i915_gem_object_unlock(vma->obj);
2311
2312         i915_vma_put(vma);
2313 }
2314
2315 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2316                           unsigned int rotation)
2317 {
2318         if (drm_rotation_90_or_270(rotation))
2319                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2320         else
2321                 return fb->pitches[color_plane];
2322 }
2323
2324 /*
2325  * Convert the x/y offsets into a linear offset.
2326  * Only valid with 0/180 degree rotation, which is fine since linear
2327  * offset is only used with linear buffers on pre-hsw and tiled buffers
2328  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2329  */
2330 u32 intel_fb_xy_to_linear(int x, int y,
2331                           const struct intel_plane_state *state,
2332                           int color_plane)
2333 {
2334         const struct drm_framebuffer *fb = state->hw.fb;
2335         unsigned int cpp = fb->format->cpp[color_plane];
2336         unsigned int pitch = state->color_plane[color_plane].stride;
2337
2338         return y * pitch + x * cpp;
2339 }
2340
2341 /*
2342  * Add the x/y offsets derived from fb->offsets[] to the user
2343  * specified plane src x/y offsets. The resulting x/y offsets
2344  * specify the start of scanout from the beginning of the gtt mapping.
2345  */
2346 void intel_add_fb_offsets(int *x, int *y,
2347                           const struct intel_plane_state *state,
2348                           int color_plane)
2349
2350 {
2351         *x += state->color_plane[color_plane].x;
2352         *y += state->color_plane[color_plane].y;
2353 }
2354
2355 static u32 intel_adjust_tile_offset(int *x, int *y,
2356                                     unsigned int tile_width,
2357                                     unsigned int tile_height,
2358                                     unsigned int tile_size,
2359                                     unsigned int pitch_tiles,
2360                                     u32 old_offset,
2361                                     u32 new_offset)
2362 {
2363         unsigned int pitch_pixels = pitch_tiles * tile_width;
2364         unsigned int tiles;
2365
2366         WARN_ON(old_offset & (tile_size - 1));
2367         WARN_ON(new_offset & (tile_size - 1));
2368         WARN_ON(new_offset > old_offset);
2369
2370         tiles = (old_offset - new_offset) / tile_size;
2371
2372         *y += tiles / pitch_tiles * tile_height;
2373         *x += tiles % pitch_tiles * tile_width;
2374
2375         /* minimize x in case it got needlessly big */
2376         *y += *x / pitch_pixels * tile_height;
2377         *x %= pitch_pixels;
2378
2379         return new_offset;
2380 }
2381
2382 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2383 {
2384         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2385                is_gen12_ccs_plane(fb, color_plane);
2386 }
2387
2388 static u32 intel_adjust_aligned_offset(int *x, int *y,
2389                                        const struct drm_framebuffer *fb,
2390                                        int color_plane,
2391                                        unsigned int rotation,
2392                                        unsigned int pitch,
2393                                        u32 old_offset, u32 new_offset)
2394 {
2395         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2396         unsigned int cpp = fb->format->cpp[color_plane];
2397
2398         drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
2399
2400         if (!is_surface_linear(fb, color_plane)) {
2401                 unsigned int tile_size, tile_width, tile_height;
2402                 unsigned int pitch_tiles;
2403
2404                 tile_size = intel_tile_size(dev_priv);
2405                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2406
2407                 if (drm_rotation_90_or_270(rotation)) {
2408                         pitch_tiles = pitch / tile_height;
2409                         swap(tile_width, tile_height);
2410                 } else {
2411                         pitch_tiles = pitch / (tile_width * cpp);
2412                 }
2413
2414                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2415                                          tile_size, pitch_tiles,
2416                                          old_offset, new_offset);
2417         } else {
2418                 old_offset += *y * pitch + *x * cpp;
2419
2420                 *y = (old_offset - new_offset) / pitch;
2421                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2422         }
2423
2424         return new_offset;
2425 }
2426
2427 /*
2428  * Adjust the tile offset by moving the difference into
2429  * the x/y offsets.
2430  */
2431 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2432                                              const struct intel_plane_state *state,
2433                                              int color_plane,
2434                                              u32 old_offset, u32 new_offset)
2435 {
2436         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2437                                            state->hw.rotation,
2438                                            state->color_plane[color_plane].stride,
2439                                            old_offset, new_offset);
2440 }
2441
2442 /*
2443  * Computes the aligned offset to the base tile and adjusts
2444  * x, y. bytes per pixel is assumed to be a power-of-two.
2445  *
2446  * In the 90/270 rotated case, x and y are assumed
2447  * to be already rotated to match the rotated GTT view, and
2448  * pitch is the tile_height aligned framebuffer height.
2449  *
2450  * This function is used when computing the derived information
2451  * under intel_framebuffer, so using any of that information
2452  * here is not allowed. Anything under drm_framebuffer can be
2453  * used. This is why the user has to pass in the pitch since it
2454  * is specified in the rotated orientation.
2455  */
2456 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2457                                         int *x, int *y,
2458                                         const struct drm_framebuffer *fb,
2459                                         int color_plane,
2460                                         unsigned int pitch,
2461                                         unsigned int rotation,
2462                                         u32 alignment)
2463 {
2464         unsigned int cpp = fb->format->cpp[color_plane];
2465         u32 offset, offset_aligned;
2466
2467         if (!is_surface_linear(fb, color_plane)) {
2468                 unsigned int tile_size, tile_width, tile_height;
2469                 unsigned int tile_rows, tiles, pitch_tiles;
2470
2471                 tile_size = intel_tile_size(dev_priv);
2472                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2473
2474                 if (drm_rotation_90_or_270(rotation)) {
2475                         pitch_tiles = pitch / tile_height;
2476                         swap(tile_width, tile_height);
2477                 } else {
2478                         pitch_tiles = pitch / (tile_width * cpp);
2479                 }
2480
2481                 tile_rows = *y / tile_height;
2482                 *y %= tile_height;
2483
2484                 tiles = *x / tile_width;
2485                 *x %= tile_width;
2486
2487                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2488
2489                 offset_aligned = offset;
2490                 if (alignment)
2491                         offset_aligned = rounddown(offset_aligned, alignment);
2492
2493                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2494                                          tile_size, pitch_tiles,
2495                                          offset, offset_aligned);
2496         } else {
2497                 offset = *y * pitch + *x * cpp;
2498                 offset_aligned = offset;
2499                 if (alignment) {
2500                         offset_aligned = rounddown(offset_aligned, alignment);
2501                         *y = (offset % alignment) / pitch;
2502                         *x = ((offset % alignment) - *y * pitch) / cpp;
2503                 } else {
2504                         *y = *x = 0;
2505                 }
2506         }
2507
2508         return offset_aligned;
2509 }
2510
2511 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2512                                               const struct intel_plane_state *state,
2513                                               int color_plane)
2514 {
2515         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2516         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2517         const struct drm_framebuffer *fb = state->hw.fb;
2518         unsigned int rotation = state->hw.rotation;
2519         int pitch = state->color_plane[color_plane].stride;
2520         u32 alignment;
2521
2522         if (intel_plane->id == PLANE_CURSOR)
2523                 alignment = intel_cursor_alignment(dev_priv);
2524         else
2525                 alignment = intel_surf_alignment(fb, color_plane);
2526
2527         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2528                                             pitch, rotation, alignment);
2529 }
2530
2531 /* Convert the fb->offset[] into x/y offsets */
2532 static int intel_fb_offset_to_xy(int *x, int *y,
2533                                  const struct drm_framebuffer *fb,
2534                                  int color_plane)
2535 {
2536         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2537         unsigned int height;
2538         u32 alignment;
2539
2540         if (INTEL_GEN(dev_priv) >= 12 &&
2541             is_semiplanar_uv_plane(fb, color_plane))
2542                 alignment = intel_tile_row_size(fb, color_plane);
2543         else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2544                 alignment = intel_tile_size(dev_priv);
2545         else
2546                 alignment = 0;
2547
2548         if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2549                 drm_dbg_kms(&dev_priv->drm,
2550                             "Misaligned offset 0x%08x for color plane %d\n",
2551                             fb->offsets[color_plane], color_plane);
2552                 return -EINVAL;
2553         }
2554
2555         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2556         height = ALIGN(height, intel_tile_height(fb, color_plane));
2557
2558         /* Catch potential overflows early */
2559         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2560                             fb->offsets[color_plane])) {
2561                 drm_dbg_kms(&dev_priv->drm,
2562                             "Bad offset 0x%08x or pitch %d for color plane %d\n",
2563                             fb->offsets[color_plane], fb->pitches[color_plane],
2564                             color_plane);
2565                 return -ERANGE;
2566         }
2567
2568         *x = 0;
2569         *y = 0;
2570
2571         intel_adjust_aligned_offset(x, y,
2572                                     fb, color_plane, DRM_MODE_ROTATE_0,
2573                                     fb->pitches[color_plane],
2574                                     fb->offsets[color_plane], 0);
2575
2576         return 0;
2577 }
2578
2579 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2580 {
2581         switch (fb_modifier) {
2582         case I915_FORMAT_MOD_X_TILED:
2583                 return I915_TILING_X;
2584         case I915_FORMAT_MOD_Y_TILED:
2585         case I915_FORMAT_MOD_Y_TILED_CCS:
2586         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2587         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2588                 return I915_TILING_Y;
2589         default:
2590                 return I915_TILING_NONE;
2591         }
2592 }
2593
2594 /*
2595  * From the Sky Lake PRM:
2596  * "The Color Control Surface (CCS) contains the compression status of
2597  *  the cache-line pairs. The compression state of the cache-line pair
2598  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2599  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2600  *  cache-line-pairs. CCS is always Y tiled."
2601  *
2602  * Since cache line pairs refers to horizontally adjacent cache lines,
2603  * each cache line in the CCS corresponds to an area of 32x16 cache
2604  * lines on the main surface. Since each pixel is 4 bytes, this gives
2605  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2606  * main surface.
2607  */
2608 static const struct drm_format_info skl_ccs_formats[] = {
2609         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2610           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2611         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2612           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2613         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2614           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2615         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2616           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2617 };
2618
2619 /*
2620  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2621  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2622  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2623  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2624  * the main surface.
2625  */
2626 static const struct drm_format_info gen12_ccs_formats[] = {
2627         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2628           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2629           .hsub = 1, .vsub = 1, },
2630         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2631           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2632           .hsub = 1, .vsub = 1, },
2633         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2634           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2635           .hsub = 1, .vsub = 1, .has_alpha = true },
2636         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2637           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2638           .hsub = 1, .vsub = 1, .has_alpha = true },
2639         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
2640           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2641           .hsub = 2, .vsub = 1, .is_yuv = true },
2642         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
2643           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2644           .hsub = 2, .vsub = 1, .is_yuv = true },
2645         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
2646           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2647           .hsub = 2, .vsub = 1, .is_yuv = true },
2648         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
2649           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2650           .hsub = 2, .vsub = 1, .is_yuv = true },
2651         { .format = DRM_FORMAT_NV12, .num_planes = 4,
2652           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2653           .hsub = 2, .vsub = 2, .is_yuv = true },
2654         { .format = DRM_FORMAT_P010, .num_planes = 4,
2655           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2656           .hsub = 2, .vsub = 2, .is_yuv = true },
2657         { .format = DRM_FORMAT_P012, .num_planes = 4,
2658           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2659           .hsub = 2, .vsub = 2, .is_yuv = true },
2660         { .format = DRM_FORMAT_P016, .num_planes = 4,
2661           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2662           .hsub = 2, .vsub = 2, .is_yuv = true },
2663 };
2664
2665 static const struct drm_format_info *
2666 lookup_format_info(const struct drm_format_info formats[],
2667                    int num_formats, u32 format)
2668 {
2669         int i;
2670
2671         for (i = 0; i < num_formats; i++) {
2672                 if (formats[i].format == format)
2673                         return &formats[i];
2674         }
2675
2676         return NULL;
2677 }
2678
2679 static const struct drm_format_info *
2680 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2681 {
2682         switch (cmd->modifier[0]) {
2683         case I915_FORMAT_MOD_Y_TILED_CCS:
2684         case I915_FORMAT_MOD_Yf_TILED_CCS:
2685                 return lookup_format_info(skl_ccs_formats,
2686                                           ARRAY_SIZE(skl_ccs_formats),
2687                                           cmd->pixel_format);
2688         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2689         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2690                 return lookup_format_info(gen12_ccs_formats,
2691                                           ARRAY_SIZE(gen12_ccs_formats),
2692                                           cmd->pixel_format);
2693         default:
2694                 return NULL;
2695         }
2696 }
2697
2698 bool is_ccs_modifier(u64 modifier)
2699 {
2700         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2701                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2702                modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2703                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2704 }
2705
2706 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2707 {
2708         return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2709                             512) * 64;
2710 }
2711
2712 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2713                               u32 pixel_format, u64 modifier)
2714 {
2715         struct intel_crtc *crtc;
2716         struct intel_plane *plane;
2717
2718         /*
2719          * We assume the primary plane for pipe A has
2720          * the highest stride limits of them all,
2721          * if in case pipe A is disabled, use the first pipe from pipe_mask.
2722          */
2723         crtc = intel_get_first_crtc(dev_priv);
2724         if (!crtc)
2725                 return 0;
2726
2727         plane = to_intel_plane(crtc->base.primary);
2728
2729         return plane->max_stride(plane, pixel_format, modifier,
2730                                  DRM_MODE_ROTATE_0);
2731 }
2732
2733 static
2734 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2735                         u32 pixel_format, u64 modifier)
2736 {
2737         /*
2738          * Arbitrary limit for gen4+ chosen to match the
2739          * render engine max stride.
2740          *
2741          * The new CCS hash mode makes remapping impossible
2742          */
2743         if (!is_ccs_modifier(modifier)) {
2744                 if (INTEL_GEN(dev_priv) >= 7)
2745                         return 256*1024;
2746                 else if (INTEL_GEN(dev_priv) >= 4)
2747                         return 128*1024;
2748         }
2749
2750         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2751 }
2752
2753 static u32
2754 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2755 {
2756         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2757         u32 tile_width;
2758
2759         if (is_surface_linear(fb, color_plane)) {
2760                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2761                                                            fb->format->format,
2762                                                            fb->modifier);
2763
2764                 /*
2765                  * To make remapping with linear generally feasible
2766                  * we need the stride to be page aligned.
2767                  */
2768                 if (fb->pitches[color_plane] > max_stride &&
2769                     !is_ccs_modifier(fb->modifier))
2770                         return intel_tile_size(dev_priv);
2771                 else
2772                         return 64;
2773         }
2774
2775         tile_width = intel_tile_width_bytes(fb, color_plane);
2776         if (is_ccs_modifier(fb->modifier)) {
2777                 /*
2778                  * Display WA #0531: skl,bxt,kbl,glk
2779                  *
2780                  * Render decompression and plane width > 3840
2781                  * combined with horizontal panning requires the
2782                  * plane stride to be a multiple of 4. We'll just
2783                  * require the entire fb to accommodate that to avoid
2784                  * potential runtime errors at plane configuration time.
2785                  */
2786                 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2787                         tile_width *= 4;
2788                 /*
2789                  * The main surface pitch must be padded to a multiple of four
2790                  * tile widths.
2791                  */
2792                 else if (INTEL_GEN(dev_priv) >= 12)
2793                         tile_width *= 4;
2794         }
2795         return tile_width;
2796 }
2797
2798 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2799 {
2800         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2801         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2802         const struct drm_framebuffer *fb = plane_state->hw.fb;
2803         int i;
2804
2805         /* We don't want to deal with remapping with cursors */
2806         if (plane->id == PLANE_CURSOR)
2807                 return false;
2808
2809         /*
2810          * The display engine limits already match/exceed the
2811          * render engine limits, so not much point in remapping.
2812          * Would also need to deal with the fence POT alignment
2813          * and gen2 2KiB GTT tile size.
2814          */
2815         if (INTEL_GEN(dev_priv) < 4)
2816                 return false;
2817
2818         /*
2819          * The new CCS hash mode isn't compatible with remapping as
2820          * the virtual address of the pages affects the compressed data.
2821          */
2822         if (is_ccs_modifier(fb->modifier))
2823                 return false;
2824
2825         /* Linear needs a page aligned stride for remapping */
2826         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2827                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2828
2829                 for (i = 0; i < fb->format->num_planes; i++) {
2830                         if (fb->pitches[i] & alignment)
2831                                 return false;
2832                 }
2833         }
2834
2835         return true;
2836 }
2837
2838 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2839 {
2840         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2841         const struct drm_framebuffer *fb = plane_state->hw.fb;
2842         unsigned int rotation = plane_state->hw.rotation;
2843         u32 stride, max_stride;
2844
2845         /*
2846          * No remapping for invisible planes since we don't have
2847          * an actual source viewport to remap.
2848          */
2849         if (!plane_state->uapi.visible)
2850                 return false;
2851
2852         if (!intel_plane_can_remap(plane_state))
2853                 return false;
2854
2855         /*
2856          * FIXME: aux plane limits on gen9+ are
2857          * unclear in Bspec, for now no checking.
2858          */
2859         stride = intel_fb_pitch(fb, 0, rotation);
2860         max_stride = plane->max_stride(plane, fb->format->format,
2861                                        fb->modifier, rotation);
2862
2863         return stride > max_stride;
2864 }
2865
2866 static void
2867 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2868                                const struct drm_framebuffer *fb,
2869                                int color_plane)
2870 {
2871         int main_plane;
2872
2873         if (color_plane == 0) {
2874                 *hsub = 1;
2875                 *vsub = 1;
2876
2877                 return;
2878         }
2879
2880         /*
2881          * TODO: Deduct the subsampling from the char block for all CCS
2882          * formats and planes.
2883          */
2884         if (!is_gen12_ccs_plane(fb, color_plane)) {
2885                 *hsub = fb->format->hsub;
2886                 *vsub = fb->format->vsub;
2887
2888                 return;
2889         }
2890
2891         main_plane = ccs_to_main_plane(fb, color_plane);
2892         *hsub = drm_format_info_block_width(fb->format, color_plane) /
2893                 drm_format_info_block_width(fb->format, main_plane);
2894
2895         /*
2896          * The min stride check in the core framebuffer_check() function
2897          * assumes that format->hsub applies to every plane except for the
2898          * first plane. That's incorrect for the CCS AUX plane of the first
2899          * plane, but for the above check to pass we must define the block
2900          * width with that subsampling applied to it. Adjust the width here
2901          * accordingly, so we can calculate the actual subsampling factor.
2902          */
2903         if (main_plane == 0)
2904                 *hsub *= fb->format->hsub;
2905
2906         *vsub = 32;
2907 }
2908 static int
2909 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2910 {
2911         struct drm_i915_private *i915 = to_i915(fb->dev);
2912         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2913         int main_plane;
2914         int hsub, vsub;
2915         int tile_width, tile_height;
2916         int ccs_x, ccs_y;
2917         int main_x, main_y;
2918
2919         if (!is_ccs_plane(fb, ccs_plane))
2920                 return 0;
2921
2922         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2923         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2924
2925         tile_width *= hsub;
2926         tile_height *= vsub;
2927
2928         ccs_x = (x * hsub) % tile_width;
2929         ccs_y = (y * vsub) % tile_height;
2930
2931         main_plane = ccs_to_main_plane(fb, ccs_plane);
2932         main_x = intel_fb->normal[main_plane].x % tile_width;
2933         main_y = intel_fb->normal[main_plane].y % tile_height;
2934
2935         /*
2936          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2937          * x/y offsets must match between CCS and the main surface.
2938          */
2939         if (main_x != ccs_x || main_y != ccs_y) {
2940                 drm_dbg_kms(&i915->drm,
2941                               "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2942                               main_x, main_y,
2943                               ccs_x, ccs_y,
2944                               intel_fb->normal[main_plane].x,
2945                               intel_fb->normal[main_plane].y,
2946                               x, y);
2947                 return -EINVAL;
2948         }
2949
2950         return 0;
2951 }
2952
2953 static void
2954 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2955 {
2956         int main_plane = is_ccs_plane(fb, color_plane) ?
2957                          ccs_to_main_plane(fb, color_plane) : 0;
2958         int main_hsub, main_vsub;
2959         int hsub, vsub;
2960
2961         intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2962         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2963         *w = fb->width / main_hsub / hsub;
2964         *h = fb->height / main_vsub / vsub;
2965 }
2966
2967 /*
2968  * Setup the rotated view for an FB plane and return the size the GTT mapping
2969  * requires for this view.
2970  */
2971 static u32
2972 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2973                   u32 gtt_offset_rotated, int x, int y,
2974                   unsigned int width, unsigned int height,
2975                   unsigned int tile_size,
2976                   unsigned int tile_width, unsigned int tile_height,
2977                   struct drm_framebuffer *fb)
2978 {
2979         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2980         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2981         unsigned int pitch_tiles;
2982         struct drm_rect r;
2983
2984         /* Y or Yf modifiers required for 90/270 rotation */
2985         if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2986             fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2987                 return 0;
2988
2989         if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
2990                 return 0;
2991
2992         rot_info->plane[plane] = *plane_info;
2993
2994         intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
2995
2996         /* rotate the x/y offsets to match the GTT view */
2997         drm_rect_init(&r, x, y, width, height);
2998         drm_rect_rotate(&r,
2999                         plane_info->width * tile_width,
3000                         plane_info->height * tile_height,
3001                         DRM_MODE_ROTATE_270);
3002         x = r.x1;
3003         y = r.y1;
3004
3005         /* rotate the tile dimensions to match the GTT view */
3006         pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
3007         swap(tile_width, tile_height);
3008
3009         /*
3010          * We only keep the x/y offsets, so push all of the
3011          * gtt offset into the x/y offsets.
3012          */
3013         intel_adjust_tile_offset(&x, &y,
3014                                  tile_width, tile_height,
3015                                  tile_size, pitch_tiles,
3016                                  gtt_offset_rotated * tile_size, 0);
3017
3018         /*
3019          * First pixel of the framebuffer from
3020          * the start of the rotated gtt mapping.
3021          */
3022         intel_fb->rotated[plane].x = x;
3023         intel_fb->rotated[plane].y = y;
3024
3025         return plane_info->width * plane_info->height;
3026 }
3027
3028 static int
3029 intel_fill_fb_info(struct drm_i915_private *dev_priv,
3030                    struct drm_framebuffer *fb)
3031 {
3032         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3033         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3034         u32 gtt_offset_rotated = 0;
3035         unsigned int max_size = 0;
3036         int i, num_planes = fb->format->num_planes;
3037         unsigned int tile_size = intel_tile_size(dev_priv);
3038
3039         for (i = 0; i < num_planes; i++) {
3040                 unsigned int width, height;
3041                 unsigned int cpp, size;
3042                 u32 offset;
3043                 int x, y;
3044                 int ret;
3045
3046                 cpp = fb->format->cpp[i];
3047                 intel_fb_plane_dims(&width, &height, fb, i);
3048
3049                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3050                 if (ret) {
3051                         drm_dbg_kms(&dev_priv->drm,
3052                                     "bad fb plane %d offset: 0x%x\n",
3053                                     i, fb->offsets[i]);
3054                         return ret;
3055                 }
3056
3057                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
3058                 if (ret)
3059                         return ret;
3060
3061                 /*
3062                  * The fence (if used) is aligned to the start of the object
3063                  * so having the framebuffer wrap around across the edge of the
3064                  * fenced region doesn't really work. We have no API to configure
3065                  * the fence start offset within the object (nor could we probably
3066                  * on gen2/3). So it's just easier if we just require that the
3067                  * fb layout agrees with the fence layout. We already check that the
3068                  * fb stride matches the fence stride elsewhere.
3069                  */
3070                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
3071                     (x + width) * cpp > fb->pitches[i]) {
3072                         drm_dbg_kms(&dev_priv->drm,
3073                                     "bad fb plane %d offset: 0x%x\n",
3074                                      i, fb->offsets[i]);
3075                         return -EINVAL;
3076                 }
3077
3078                 /*
3079                  * First pixel of the framebuffer from
3080                  * the start of the normal gtt mapping.
3081                  */
3082                 intel_fb->normal[i].x = x;
3083                 intel_fb->normal[i].y = y;
3084
3085                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3086                                                       fb->pitches[i],
3087                                                       DRM_MODE_ROTATE_0,
3088                                                       tile_size);
3089                 offset /= tile_size;
3090
3091                 if (!is_surface_linear(fb, i)) {
3092                         struct intel_remapped_plane_info plane_info;
3093                         unsigned int tile_width, tile_height;
3094
3095                         intel_tile_dims(fb, i, &tile_width, &tile_height);
3096
3097                         plane_info.offset = offset;
3098                         plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3099                                                          tile_width * cpp);
3100                         plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3101                         plane_info.height = DIV_ROUND_UP(y + height,
3102                                                          tile_height);
3103
3104                         /* how many tiles does this plane need */
3105                         size = plane_info.stride * plane_info.height;
3106                         /*
3107                          * If the plane isn't horizontally tile aligned,
3108                          * we need one more tile.
3109                          */
3110                         if (x != 0)
3111                                 size++;
3112
3113                         gtt_offset_rotated +=
3114                                 setup_fb_rotation(i, &plane_info,
3115                                                   gtt_offset_rotated,
3116                                                   x, y, width, height,
3117                                                   tile_size,
3118                                                   tile_width, tile_height,
3119                                                   fb);
3120                 } else {
3121                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3122                                             x * cpp, tile_size);
3123                 }
3124
3125                 /* how many tiles in total needed in the bo */
3126                 max_size = max(max_size, offset + size);
3127         }
3128
3129         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3130                 drm_dbg_kms(&dev_priv->drm,
3131                             "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3132                             mul_u32_u32(max_size, tile_size), obj->base.size);
3133                 return -EINVAL;
3134         }
3135
3136         return 0;
3137 }
3138
3139 static void
3140 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3141 {
3142         struct drm_i915_private *dev_priv =
3143                 to_i915(plane_state->uapi.plane->dev);
3144         struct drm_framebuffer *fb = plane_state->hw.fb;
3145         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3146         struct intel_rotation_info *info = &plane_state->view.rotated;
3147         unsigned int rotation = plane_state->hw.rotation;
3148         int i, num_planes = fb->format->num_planes;
3149         unsigned int tile_size = intel_tile_size(dev_priv);
3150         unsigned int src_x, src_y;
3151         unsigned int src_w, src_h;
3152         u32 gtt_offset = 0;
3153
3154         memset(&plane_state->view, 0, sizeof(plane_state->view));
3155         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3156                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3157
3158         src_x = plane_state->uapi.src.x1 >> 16;
3159         src_y = plane_state->uapi.src.y1 >> 16;
3160         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3161         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3162
3163         drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
3164
3165         /* Make src coordinates relative to the viewport */
3166         drm_rect_translate(&plane_state->uapi.src,
3167                            -(src_x << 16), -(src_y << 16));
3168
3169         /* Rotate src coordinates to match rotated GTT view */
3170         if (drm_rotation_90_or_270(rotation))
3171                 drm_rect_rotate(&plane_state->uapi.src,
3172                                 src_w << 16, src_h << 16,
3173                                 DRM_MODE_ROTATE_270);
3174
3175         for (i = 0; i < num_planes; i++) {
3176                 unsigned int hsub = i ? fb->format->hsub : 1;
3177                 unsigned int vsub = i ? fb->format->vsub : 1;
3178                 unsigned int cpp = fb->format->cpp[i];
3179                 unsigned int tile_width, tile_height;
3180                 unsigned int width, height;
3181                 unsigned int pitch_tiles;
3182                 unsigned int x, y;
3183                 u32 offset;
3184
3185                 intel_tile_dims(fb, i, &tile_width, &tile_height);
3186
3187                 x = src_x / hsub;
3188                 y = src_y / vsub;
3189                 width = src_w / hsub;
3190                 height = src_h / vsub;
3191
3192                 /*
3193                  * First pixel of the src viewport from the
3194                  * start of the normal gtt mapping.
3195                  */
3196                 x += intel_fb->normal[i].x;
3197                 y += intel_fb->normal[i].y;
3198
3199                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3200                                                       fb, i, fb->pitches[i],
3201                                                       DRM_MODE_ROTATE_0, tile_size);
3202                 offset /= tile_size;
3203
3204                 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
3205                 info->plane[i].offset = offset;
3206                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3207                                                      tile_width * cpp);
3208                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3209                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3210
3211                 if (drm_rotation_90_or_270(rotation)) {
3212                         struct drm_rect r;
3213
3214                         /* rotate the x/y offsets to match the GTT view */
3215                         drm_rect_init(&r, x, y, width, height);
3216                         drm_rect_rotate(&r,
3217                                         info->plane[i].width * tile_width,
3218                                         info->plane[i].height * tile_height,
3219                                         DRM_MODE_ROTATE_270);
3220                         x = r.x1;
3221                         y = r.y1;
3222
3223                         pitch_tiles = info->plane[i].height;
3224                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3225
3226                         /* rotate the tile dimensions to match the GTT view */
3227                         swap(tile_width, tile_height);
3228                 } else {
3229                         pitch_tiles = info->plane[i].width;
3230                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3231                 }
3232
3233                 /*
3234                  * We only keep the x/y offsets, so push all of the
3235                  * gtt offset into the x/y offsets.
3236                  */
3237                 intel_adjust_tile_offset(&x, &y,
3238                                          tile_width, tile_height,
3239                                          tile_size, pitch_tiles,
3240                                          gtt_offset * tile_size, 0);
3241
3242                 gtt_offset += info->plane[i].width * info->plane[i].height;
3243
3244                 plane_state->color_plane[i].offset = 0;
3245                 plane_state->color_plane[i].x = x;
3246                 plane_state->color_plane[i].y = y;
3247         }
3248 }
3249
3250 static int
3251 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3252 {
3253         const struct intel_framebuffer *fb =
3254                 to_intel_framebuffer(plane_state->hw.fb);
3255         unsigned int rotation = plane_state->hw.rotation;
3256         int i, num_planes;
3257
3258         if (!fb)
3259                 return 0;
3260
3261         num_planes = fb->base.format->num_planes;
3262
3263         if (intel_plane_needs_remap(plane_state)) {
3264                 intel_plane_remap_gtt(plane_state);
3265
3266                 /*
3267                  * Sometimes even remapping can't overcome
3268                  * the stride limitations :( Can happen with
3269                  * big plane sizes and suitably misaligned
3270                  * offsets.
3271                  */
3272                 return intel_plane_check_stride(plane_state);
3273         }
3274
3275         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3276
3277         for (i = 0; i < num_planes; i++) {
3278                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3279                 plane_state->color_plane[i].offset = 0;
3280
3281                 if (drm_rotation_90_or_270(rotation)) {
3282                         plane_state->color_plane[i].x = fb->rotated[i].x;
3283                         plane_state->color_plane[i].y = fb->rotated[i].y;
3284                 } else {
3285                         plane_state->color_plane[i].x = fb->normal[i].x;
3286                         plane_state->color_plane[i].y = fb->normal[i].y;
3287                 }
3288         }
3289
3290         /* Rotate src coordinates to match rotated GTT view */
3291         if (drm_rotation_90_or_270(rotation))
3292                 drm_rect_rotate(&plane_state->uapi.src,
3293                                 fb->base.width << 16, fb->base.height << 16,
3294                                 DRM_MODE_ROTATE_270);
3295
3296         return intel_plane_check_stride(plane_state);
3297 }
3298
3299 static int i9xx_format_to_fourcc(int format)
3300 {
3301         switch (format) {
3302         case DISPPLANE_8BPP:
3303                 return DRM_FORMAT_C8;
3304         case DISPPLANE_BGRA555:
3305                 return DRM_FORMAT_ARGB1555;
3306         case DISPPLANE_BGRX555:
3307                 return DRM_FORMAT_XRGB1555;
3308         case DISPPLANE_BGRX565:
3309                 return DRM_FORMAT_RGB565;
3310         default:
3311         case DISPPLANE_BGRX888:
3312                 return DRM_FORMAT_XRGB8888;
3313         case DISPPLANE_RGBX888:
3314                 return DRM_FORMAT_XBGR8888;
3315         case DISPPLANE_BGRA888:
3316                 return DRM_FORMAT_ARGB8888;
3317         case DISPPLANE_RGBA888:
3318                 return DRM_FORMAT_ABGR8888;
3319         case DISPPLANE_BGRX101010:
3320                 return DRM_FORMAT_XRGB2101010;
3321         case DISPPLANE_RGBX101010:
3322                 return DRM_FORMAT_XBGR2101010;
3323         case DISPPLANE_BGRA101010:
3324                 return DRM_FORMAT_ARGB2101010;
3325         case DISPPLANE_RGBA101010:
3326                 return DRM_FORMAT_ABGR2101010;
3327         case DISPPLANE_RGBX161616:
3328                 return DRM_FORMAT_XBGR16161616F;
3329         }
3330 }
3331
3332 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3333 {
3334         switch (format) {
3335         case PLANE_CTL_FORMAT_RGB_565:
3336                 return DRM_FORMAT_RGB565;
3337         case PLANE_CTL_FORMAT_NV12:
3338                 return DRM_FORMAT_NV12;
3339         case PLANE_CTL_FORMAT_P010:
3340                 return DRM_FORMAT_P010;
3341         case PLANE_CTL_FORMAT_P012:
3342                 return DRM_FORMAT_P012;
3343         case PLANE_CTL_FORMAT_P016:
3344                 return DRM_FORMAT_P016;
3345         case PLANE_CTL_FORMAT_Y210:
3346                 return DRM_FORMAT_Y210;
3347         case PLANE_CTL_FORMAT_Y212:
3348                 return DRM_FORMAT_Y212;
3349         case PLANE_CTL_FORMAT_Y216:
3350                 return DRM_FORMAT_Y216;
3351         case PLANE_CTL_FORMAT_Y410:
3352                 return DRM_FORMAT_XVYU2101010;
3353         case PLANE_CTL_FORMAT_Y412:
3354                 return DRM_FORMAT_XVYU12_16161616;
3355         case PLANE_CTL_FORMAT_Y416:
3356                 return DRM_FORMAT_XVYU16161616;
3357         default:
3358         case PLANE_CTL_FORMAT_XRGB_8888:
3359                 if (rgb_order) {
3360                         if (alpha)
3361                                 return DRM_FORMAT_ABGR8888;
3362                         else
3363                                 return DRM_FORMAT_XBGR8888;
3364                 } else {
3365                         if (alpha)
3366                                 return DRM_FORMAT_ARGB8888;
3367                         else
3368                                 return DRM_FORMAT_XRGB8888;
3369                 }
3370         case PLANE_CTL_FORMAT_XRGB_2101010:
3371                 if (rgb_order) {
3372                         if (alpha)
3373                                 return DRM_FORMAT_ABGR2101010;
3374                         else
3375                                 return DRM_FORMAT_XBGR2101010;
3376                 } else {
3377                         if (alpha)
3378                                 return DRM_FORMAT_ARGB2101010;
3379                         else
3380                                 return DRM_FORMAT_XRGB2101010;
3381                 }
3382         case PLANE_CTL_FORMAT_XRGB_16161616F:
3383                 if (rgb_order) {
3384                         if (alpha)
3385                                 return DRM_FORMAT_ABGR16161616F;
3386                         else
3387                                 return DRM_FORMAT_XBGR16161616F;
3388                 } else {
3389                         if (alpha)
3390                                 return DRM_FORMAT_ARGB16161616F;
3391                         else
3392                                 return DRM_FORMAT_XRGB16161616F;
3393                 }
3394         }
3395 }
3396
3397 static struct i915_vma *
3398 initial_plane_vma(struct drm_i915_private *i915,
3399                   struct intel_initial_plane_config *plane_config)
3400 {
3401         struct drm_i915_gem_object *obj;
3402         struct i915_vma *vma;
3403         u32 base, size;
3404
3405         if (plane_config->size == 0)
3406                 return NULL;
3407
3408         base = round_down(plane_config->base,
3409                           I915_GTT_MIN_ALIGNMENT);
3410         size = round_up(plane_config->base + plane_config->size,
3411                         I915_GTT_MIN_ALIGNMENT);
3412         size -= base;
3413
3414         /*
3415          * If the FB is too big, just don't use it since fbdev is not very
3416          * important and we should probably use that space with FBC or other
3417          * features.
3418          */
3419         if (size * 2 > i915->stolen_usable_size)
3420                 return NULL;
3421
3422         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
3423         if (IS_ERR(obj))
3424                 return NULL;
3425
3426         switch (plane_config->tiling) {
3427         case I915_TILING_NONE:
3428                 break;
3429         case I915_TILING_X:
3430         case I915_TILING_Y:
3431                 obj->tiling_and_stride =
3432                         plane_config->fb->base.pitches[0] |
3433                         plane_config->tiling;
3434                 break;
3435         default:
3436                 MISSING_CASE(plane_config->tiling);
3437                 goto err_obj;
3438         }
3439
3440         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
3441         if (IS_ERR(vma))
3442                 goto err_obj;
3443
3444         if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
3445                 goto err_obj;
3446
3447         if (i915_gem_object_is_tiled(obj) &&
3448             !i915_vma_is_map_and_fenceable(vma))
3449                 goto err_obj;
3450
3451         return vma;
3452
3453 err_obj:
3454         i915_gem_object_put(obj);
3455         return NULL;
3456 }
3457
3458 static bool
3459 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3460                               struct intel_initial_plane_config *plane_config)
3461 {
3462         struct drm_device *dev = crtc->base.dev;
3463         struct drm_i915_private *dev_priv = to_i915(dev);
3464         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3465         struct drm_framebuffer *fb = &plane_config->fb->base;
3466         struct i915_vma *vma;
3467
3468         switch (fb->modifier) {
3469         case DRM_FORMAT_MOD_LINEAR:
3470         case I915_FORMAT_MOD_X_TILED:
3471         case I915_FORMAT_MOD_Y_TILED:
3472                 break;
3473         default:
3474                 drm_dbg(&dev_priv->drm,
3475                         "Unsupported modifier for initial FB: 0x%llx\n",
3476                         fb->modifier);
3477                 return false;
3478         }
3479
3480         vma = initial_plane_vma(dev_priv, plane_config);
3481         if (!vma)
3482                 return false;
3483
3484         mode_cmd.pixel_format = fb->format->format;
3485         mode_cmd.width = fb->width;
3486         mode_cmd.height = fb->height;
3487         mode_cmd.pitches[0] = fb->pitches[0];
3488         mode_cmd.modifier[0] = fb->modifier;
3489         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3490
3491         if (intel_framebuffer_init(to_intel_framebuffer(fb),
3492                                    vma->obj, &mode_cmd)) {
3493                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3494                 goto err_vma;
3495         }
3496
3497         plane_config->vma = vma;
3498         return true;
3499
3500 err_vma:
3501         i915_vma_put(vma);
3502         return false;
3503 }
3504
3505 static void
3506 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3507                         struct intel_plane_state *plane_state,
3508                         bool visible)
3509 {
3510         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3511
3512         plane_state->uapi.visible = visible;
3513
3514         if (visible)
3515                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3516         else
3517                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3518 }
3519
3520 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3521 {
3522         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3523         struct drm_plane *plane;
3524
3525         /*
3526          * Active_planes aliases if multiple "primary" or cursor planes
3527          * have been used on the same (or wrong) pipe. plane_mask uses
3528          * unique ids, hence we can use that to reconstruct active_planes.
3529          */
3530         crtc_state->active_planes = 0;
3531
3532         drm_for_each_plane_mask(plane, &dev_priv->drm,
3533                                 crtc_state->uapi.plane_mask)
3534                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3535 }
3536
3537 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3538                                          struct intel_plane *plane)
3539 {
3540         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3541         struct intel_crtc_state *crtc_state =
3542                 to_intel_crtc_state(crtc->base.state);
3543         struct intel_plane_state *plane_state =
3544                 to_intel_plane_state(plane->base.state);
3545
3546         drm_dbg_kms(&dev_priv->drm,
3547                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3548                     plane->base.base.id, plane->base.name,
3549                     crtc->base.base.id, crtc->base.name);
3550
3551         intel_set_plane_visible(crtc_state, plane_state, false);
3552         fixup_active_planes(crtc_state);
3553         crtc_state->data_rate[plane->id] = 0;
3554         crtc_state->min_cdclk[plane->id] = 0;
3555
3556         if (plane->id == PLANE_PRIMARY)
3557                 hsw_disable_ips(crtc_state);
3558
3559         /*
3560          * Vblank time updates from the shadow to live plane control register
3561          * are blocked if the memory self-refresh mode is active at that
3562          * moment. So to make sure the plane gets truly disabled, disable
3563          * first the self-refresh mode. The self-refresh enable bit in turn
3564          * will be checked/applied by the HW only at the next frame start
3565          * event which is after the vblank start event, so we need to have a
3566          * wait-for-vblank between disabling the plane and the pipe.
3567          */
3568         if (HAS_GMCH(dev_priv) &&
3569             intel_set_memory_cxsr(dev_priv, false))
3570                 intel_wait_for_vblank(dev_priv, crtc->pipe);
3571
3572         /*
3573          * Gen2 reports pipe underruns whenever all planes are disabled.
3574          * So disable underrun reporting before all the planes get disabled.
3575          */
3576         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3577                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3578
3579         intel_disable_plane(plane, crtc_state);
3580 }
3581
3582 static struct intel_frontbuffer *
3583 to_intel_frontbuffer(struct drm_framebuffer *fb)
3584 {
3585         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3586 }
3587
3588 static void
3589 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3590                              struct intel_initial_plane_config *plane_config)
3591 {
3592         struct drm_device *dev = intel_crtc->base.dev;
3593         struct drm_i915_private *dev_priv = to_i915(dev);
3594         struct drm_crtc *c;
3595         struct drm_plane *primary = intel_crtc->base.primary;
3596         struct drm_plane_state *plane_state = primary->state;
3597         struct intel_plane *intel_plane = to_intel_plane(primary);
3598         struct intel_plane_state *intel_state =
3599                 to_intel_plane_state(plane_state);
3600         struct drm_framebuffer *fb;
3601         struct i915_vma *vma;
3602
3603         if (!plane_config->fb)
3604                 return;
3605
3606         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3607                 fb = &plane_config->fb->base;
3608                 vma = plane_config->vma;
3609                 goto valid_fb;
3610         }
3611
3612         /*
3613          * Failed to alloc the obj, check to see if we should share
3614          * an fb with another CRTC instead
3615          */
3616         for_each_crtc(dev, c) {
3617                 struct intel_plane_state *state;
3618
3619                 if (c == &intel_crtc->base)
3620                         continue;
3621
3622                 if (!to_intel_crtc(c)->active)
3623                         continue;
3624
3625                 state = to_intel_plane_state(c->primary->state);
3626                 if (!state->vma)
3627                         continue;
3628
3629                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3630                         fb = state->hw.fb;
3631                         vma = state->vma;
3632                         goto valid_fb;
3633                 }
3634         }
3635
3636         /*
3637          * We've failed to reconstruct the BIOS FB.  Current display state
3638          * indicates that the primary plane is visible, but has a NULL FB,
3639          * which will lead to problems later if we don't fix it up.  The
3640          * simplest solution is to just disable the primary plane now and
3641          * pretend the BIOS never had it enabled.
3642          */
3643         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3644
3645         return;
3646
3647 valid_fb:
3648         intel_state->hw.rotation = plane_config->rotation;
3649         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3650                                 intel_state->hw.rotation);
3651         intel_state->color_plane[0].stride =
3652                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3653
3654         __i915_vma_pin(vma);
3655         intel_state->vma = i915_vma_get(vma);
3656         if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
3657                 if (vma->fence)
3658                         intel_state->flags |= PLANE_HAS_FENCE;
3659
3660         plane_state->src_x = 0;
3661         plane_state->src_y = 0;
3662         plane_state->src_w = fb->width << 16;
3663         plane_state->src_h = fb->height << 16;
3664
3665         plane_state->crtc_x = 0;
3666         plane_state->crtc_y = 0;
3667         plane_state->crtc_w = fb->width;
3668         plane_state->crtc_h = fb->height;
3669
3670         intel_state->uapi.src = drm_plane_state_src(plane_state);
3671         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3672
3673         if (plane_config->tiling)
3674                 dev_priv->preserve_bios_swizzle = true;
3675
3676         plane_state->fb = fb;
3677         drm_framebuffer_get(fb);
3678
3679         plane_state->crtc = &intel_crtc->base;
3680         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3681
3682         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3683
3684         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3685                   &to_intel_frontbuffer(fb)->bits);
3686 }
3687
3688 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3689                                int color_plane,
3690                                unsigned int rotation)
3691 {
3692         int cpp = fb->format->cpp[color_plane];
3693
3694         switch (fb->modifier) {
3695         case DRM_FORMAT_MOD_LINEAR:
3696         case I915_FORMAT_MOD_X_TILED:
3697                 /*
3698                  * Validated limit is 4k, but has 5k should
3699                  * work apart from the following features:
3700                  * - Ytile (already limited to 4k)
3701                  * - FP16 (already limited to 4k)
3702                  * - render compression (already limited to 4k)
3703                  * - KVMR sprite and cursor (don't care)
3704                  * - horizontal panning (TODO verify this)
3705                  * - pipe and plane scaling (TODO verify this)
3706                  */
3707                 if (cpp == 8)
3708                         return 4096;
3709                 else
3710                         return 5120;
3711         case I915_FORMAT_MOD_Y_TILED_CCS:
3712         case I915_FORMAT_MOD_Yf_TILED_CCS:
3713         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
3714                 /* FIXME AUX plane? */
3715         case I915_FORMAT_MOD_Y_TILED:
3716         case I915_FORMAT_MOD_Yf_TILED:
3717                 if (cpp == 8)
3718                         return 2048;
3719                 else
3720                         return 4096;
3721         default:
3722                 MISSING_CASE(fb->modifier);
3723                 return 2048;
3724         }
3725 }
3726
3727 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3728                                int color_plane,
3729                                unsigned int rotation)
3730 {
3731         int cpp = fb->format->cpp[color_plane];
3732
3733         switch (fb->modifier) {
3734         case DRM_FORMAT_MOD_LINEAR:
3735         case I915_FORMAT_MOD_X_TILED:
3736                 if (cpp == 8)
3737                         return 4096;
3738                 else
3739                         return 5120;
3740         case I915_FORMAT_MOD_Y_TILED_CCS:
3741         case I915_FORMAT_MOD_Yf_TILED_CCS:
3742                 /* FIXME AUX plane? */
3743         case I915_FORMAT_MOD_Y_TILED:
3744         case I915_FORMAT_MOD_Yf_TILED:
3745                 if (cpp == 8)
3746                         return 2048;
3747                 else
3748                         return 5120;
3749         default:
3750                 MISSING_CASE(fb->modifier);
3751                 return 2048;
3752         }
3753 }
3754
3755 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3756                                int color_plane,
3757                                unsigned int rotation)
3758 {
3759         return 5120;
3760 }
3761
3762 static int skl_max_plane_height(void)
3763 {
3764         return 4096;
3765 }
3766
3767 static int icl_max_plane_height(void)
3768 {
3769         return 4320;
3770 }
3771
3772 static bool
3773 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3774                                int main_x, int main_y, u32 main_offset,
3775                                int ccs_plane)
3776 {
3777         const struct drm_framebuffer *fb = plane_state->hw.fb;
3778         int aux_x = plane_state->color_plane[ccs_plane].x;
3779         int aux_y = plane_state->color_plane[ccs_plane].y;
3780         u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3781         u32 alignment = intel_surf_alignment(fb, ccs_plane);
3782         int hsub;
3783         int vsub;
3784
3785         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3786         while (aux_offset >= main_offset && aux_y <= main_y) {
3787                 int x, y;
3788
3789                 if (aux_x == main_x && aux_y == main_y)
3790                         break;
3791
3792                 if (aux_offset == 0)
3793                         break;
3794
3795                 x = aux_x / hsub;
3796                 y = aux_y / vsub;
3797                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3798                                                                plane_state,
3799                                                                ccs_plane,
3800                                                                aux_offset,
3801                                                                aux_offset -
3802                                                                 alignment);
3803                 aux_x = x * hsub + aux_x % hsub;
3804                 aux_y = y * vsub + aux_y % vsub;
3805         }
3806
3807         if (aux_x != main_x || aux_y != main_y)
3808                 return false;
3809
3810         plane_state->color_plane[ccs_plane].offset = aux_offset;
3811         plane_state->color_plane[ccs_plane].x = aux_x;
3812         plane_state->color_plane[ccs_plane].y = aux_y;
3813
3814         return true;
3815 }
3816
3817 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3818 {
3819         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3820         const struct drm_framebuffer *fb = plane_state->hw.fb;
3821         unsigned int rotation = plane_state->hw.rotation;
3822         int x = plane_state->uapi.src.x1 >> 16;
3823         int y = plane_state->uapi.src.y1 >> 16;
3824         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3825         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3826         int max_width;
3827         int max_height;
3828         u32 alignment;
3829         u32 offset;
3830         int aux_plane = intel_main_to_aux_plane(fb, 0);
3831         u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3832
3833         if (INTEL_GEN(dev_priv) >= 11)
3834                 max_width = icl_max_plane_width(fb, 0, rotation);
3835         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3836                 max_width = glk_max_plane_width(fb, 0, rotation);
3837         else
3838                 max_width = skl_max_plane_width(fb, 0, rotation);
3839
3840         if (INTEL_GEN(dev_priv) >= 11)
3841                 max_height = icl_max_plane_height();
3842         else
3843                 max_height = skl_max_plane_height();
3844
3845         if (w > max_width || h > max_height) {
3846                 drm_dbg_kms(&dev_priv->drm,
3847                             "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3848                             w, h, max_width, max_height);
3849                 return -EINVAL;
3850         }
3851
3852         intel_add_fb_offsets(&x, &y, plane_state, 0);
3853         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3854         alignment = intel_surf_alignment(fb, 0);
3855         if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
3856                 return -EINVAL;
3857
3858         /*
3859          * AUX surface offset is specified as the distance from the
3860          * main surface offset, and it must be non-negative. Make
3861          * sure that is what we will get.
3862          */
3863         if (offset > aux_offset)
3864                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3865                                                            offset, aux_offset & ~(alignment - 1));
3866
3867         /*
3868          * When using an X-tiled surface, the plane blows up
3869          * if the x offset + width exceed the stride.
3870          *
3871          * TODO: linear and Y-tiled seem fine, Yf untested,
3872          */
3873         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3874                 int cpp = fb->format->cpp[0];
3875
3876                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3877                         if (offset == 0) {
3878                                 drm_dbg_kms(&dev_priv->drm,
3879                                             "Unable to find suitable display surface offset due to X-tiling\n");
3880                                 return -EINVAL;
3881                         }
3882
3883                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3884                                                                    offset, offset - alignment);
3885                 }
3886         }
3887
3888         /*
3889          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3890          * they match with the main surface x/y offsets.
3891          */
3892         if (is_ccs_modifier(fb->modifier)) {
3893                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3894                                                        offset, aux_plane)) {
3895                         if (offset == 0)
3896                                 break;
3897
3898                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3899                                                                    offset, offset - alignment);
3900                 }
3901
3902                 if (x != plane_state->color_plane[aux_plane].x ||
3903                     y != plane_state->color_plane[aux_plane].y) {
3904                         drm_dbg_kms(&dev_priv->drm,
3905                                     "Unable to find suitable display surface offset due to CCS\n");
3906                         return -EINVAL;
3907                 }
3908         }
3909
3910         plane_state->color_plane[0].offset = offset;
3911         plane_state->color_plane[0].x = x;
3912         plane_state->color_plane[0].y = y;
3913
3914         /*
3915          * Put the final coordinates back so that the src
3916          * coordinate checks will see the right values.
3917          */
3918         drm_rect_translate_to(&plane_state->uapi.src,
3919                               x << 16, y << 16);
3920
3921         return 0;
3922 }
3923
3924 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3925 {
3926         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
3927         const struct drm_framebuffer *fb = plane_state->hw.fb;
3928         unsigned int rotation = plane_state->hw.rotation;
3929         int uv_plane = 1;
3930         int max_width = skl_max_plane_width(fb, uv_plane, rotation);
3931         int max_height = 4096;
3932         int x = plane_state->uapi.src.x1 >> 17;
3933         int y = plane_state->uapi.src.y1 >> 17;
3934         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3935         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3936         u32 offset;
3937
3938         intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
3939         offset = intel_plane_compute_aligned_offset(&x, &y,
3940                                                     plane_state, uv_plane);
3941
3942         /* FIXME not quite sure how/if these apply to the chroma plane */
3943         if (w > max_width || h > max_height) {
3944                 drm_dbg_kms(&i915->drm,
3945                             "CbCr source size %dx%d too big (limit %dx%d)\n",
3946                             w, h, max_width, max_height);
3947                 return -EINVAL;
3948         }
3949
3950         if (is_ccs_modifier(fb->modifier)) {
3951                 int ccs_plane = main_to_ccs_plane(fb, uv_plane);
3952                 int aux_offset = plane_state->color_plane[ccs_plane].offset;
3953                 int alignment = intel_surf_alignment(fb, uv_plane);
3954
3955                 if (offset > aux_offset)
3956                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3957                                                                    plane_state,
3958                                                                    uv_plane,
3959                                                                    offset,
3960                                                                    aux_offset & ~(alignment - 1));
3961
3962                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3963                                                        offset, ccs_plane)) {
3964                         if (offset == 0)
3965                                 break;
3966
3967                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3968                                                                    plane_state,
3969                                                                    uv_plane,
3970                                                                    offset, offset - alignment);
3971                 }
3972
3973                 if (x != plane_state->color_plane[ccs_plane].x ||
3974                     y != plane_state->color_plane[ccs_plane].y) {
3975                         drm_dbg_kms(&i915->drm,
3976                                     "Unable to find suitable display surface offset due to CCS\n");
3977                         return -EINVAL;
3978                 }
3979         }
3980
3981         plane_state->color_plane[uv_plane].offset = offset;
3982         plane_state->color_plane[uv_plane].x = x;
3983         plane_state->color_plane[uv_plane].y = y;
3984
3985         return 0;
3986 }
3987
3988 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3989 {
3990         const struct drm_framebuffer *fb = plane_state->hw.fb;
3991         int src_x = plane_state->uapi.src.x1 >> 16;
3992         int src_y = plane_state->uapi.src.y1 >> 16;
3993         u32 offset;
3994         int ccs_plane;
3995
3996         for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
3997                 int main_hsub, main_vsub;
3998                 int hsub, vsub;
3999                 int x, y;
4000
4001                 if (!is_ccs_plane(fb, ccs_plane))
4002                         continue;
4003
4004                 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
4005                                                ccs_to_main_plane(fb, ccs_plane));
4006                 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
4007
4008                 hsub *= main_hsub;
4009                 vsub *= main_vsub;
4010                 x = src_x / hsub;
4011                 y = src_y / vsub;
4012
4013                 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
4014
4015                 offset = intel_plane_compute_aligned_offset(&x, &y,
4016                                                             plane_state,
4017                                                             ccs_plane);
4018
4019                 plane_state->color_plane[ccs_plane].offset = offset;
4020                 plane_state->color_plane[ccs_plane].x = (x * hsub +
4021                                                          src_x % hsub) /
4022                                                         main_hsub;
4023                 plane_state->color_plane[ccs_plane].y = (y * vsub +
4024                                                          src_y % vsub) /
4025                                                         main_vsub;
4026         }
4027
4028         return 0;
4029 }
4030
4031 int skl_check_plane_surface(struct intel_plane_state *plane_state)
4032 {
4033         const struct drm_framebuffer *fb = plane_state->hw.fb;
4034         int ret;
4035         bool needs_aux = false;
4036
4037         ret = intel_plane_compute_gtt(plane_state);
4038         if (ret)
4039                 return ret;
4040
4041         if (!plane_state->uapi.visible)
4042                 return 0;
4043
4044         /*
4045          * Handle the AUX surface first since the main surface setup depends on
4046          * it.
4047          */
4048         if (is_ccs_modifier(fb->modifier)) {
4049                 needs_aux = true;
4050                 ret = skl_check_ccs_aux_surface(plane_state);
4051                 if (ret)
4052                         return ret;
4053         }
4054
4055         if (intel_format_info_is_yuv_semiplanar(fb->format,
4056                                                 fb->modifier)) {
4057                 needs_aux = true;
4058                 ret = skl_check_nv12_aux_surface(plane_state);
4059                 if (ret)
4060                         return ret;
4061         }
4062
4063         if (!needs_aux) {
4064                 int i;
4065
4066                 for (i = 1; i < fb->format->num_planes; i++) {
4067                         plane_state->color_plane[i].offset = ~0xfff;
4068                         plane_state->color_plane[i].x = 0;
4069                         plane_state->color_plane[i].y = 0;
4070                 }
4071         }
4072
4073         ret = skl_check_main_surface(plane_state);
4074         if (ret)
4075                 return ret;
4076
4077         return 0;
4078 }
4079
4080 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
4081                              const struct intel_plane_state *plane_state,
4082                              unsigned int *num, unsigned int *den)
4083 {
4084         const struct drm_framebuffer *fb = plane_state->hw.fb;
4085         unsigned int cpp = fb->format->cpp[0];
4086
4087         /*
4088          * g4x bspec says 64bpp pixel rate can't exceed 80%
4089          * of cdclk when the sprite plane is enabled on the
4090          * same pipe. ilk/snb bspec says 64bpp pixel rate is
4091          * never allowed to exceed 80% of cdclk. Let's just go
4092          * with the ilk/snb limit always.
4093          */
4094         if (cpp == 8) {
4095                 *num = 10;
4096                 *den = 8;
4097         } else {
4098                 *num = 1;
4099                 *den = 1;
4100         }
4101 }
4102
4103 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
4104                                 const struct intel_plane_state *plane_state)
4105 {
4106         unsigned int pixel_rate;
4107         unsigned int num, den;
4108
4109         /*
4110          * Note that crtc_state->pixel_rate accounts for both
4111          * horizontal and vertical panel fitter downscaling factors.
4112          * Pre-HSW bspec tells us to only consider the horizontal
4113          * downscaling factor here. We ignore that and just consider
4114          * both for simplicity.
4115          */
4116         pixel_rate = crtc_state->pixel_rate;
4117
4118         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
4119
4120         /* two pixels per clock with double wide pipe */
4121         if (crtc_state->double_wide)
4122                 den *= 2;
4123
4124         return DIV_ROUND_UP(pixel_rate * num, den);
4125 }
4126
4127 unsigned int
4128 i9xx_plane_max_stride(struct intel_plane *plane,
4129                       u32 pixel_format, u64 modifier,
4130                       unsigned int rotation)
4131 {
4132         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4133
4134         if (!HAS_GMCH(dev_priv)) {
4135                 return 32*1024;
4136         } else if (INTEL_GEN(dev_priv) >= 4) {
4137                 if (modifier == I915_FORMAT_MOD_X_TILED)
4138                         return 16*1024;
4139                 else
4140                         return 32*1024;
4141         } else if (INTEL_GEN(dev_priv) >= 3) {
4142                 if (modifier == I915_FORMAT_MOD_X_TILED)
4143                         return 8*1024;
4144                 else
4145                         return 16*1024;
4146         } else {
4147                 if (plane->i9xx_plane == PLANE_C)
4148                         return 4*1024;
4149                 else
4150                         return 8*1024;
4151         }
4152 }
4153
4154 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4155 {
4156         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4157         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4158         u32 dspcntr = 0;
4159
4160         if (crtc_state->gamma_enable)
4161                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
4162
4163         if (crtc_state->csc_enable)
4164                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
4165
4166         if (INTEL_GEN(dev_priv) < 5)
4167                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
4168
4169         return dspcntr;
4170 }
4171
4172 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
4173                           const struct intel_plane_state *plane_state)
4174 {
4175         struct drm_i915_private *dev_priv =
4176                 to_i915(plane_state->uapi.plane->dev);
4177         const struct drm_framebuffer *fb = plane_state->hw.fb;
4178         unsigned int rotation = plane_state->hw.rotation;
4179         u32 dspcntr;
4180
4181         dspcntr = DISPLAY_PLANE_ENABLE;
4182
4183         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
4184             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
4185                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
4186
4187         switch (fb->format->format) {
4188         case DRM_FORMAT_C8:
4189                 dspcntr |= DISPPLANE_8BPP;
4190                 break;
4191         case DRM_FORMAT_XRGB1555:
4192                 dspcntr |= DISPPLANE_BGRX555;
4193                 break;
4194         case DRM_FORMAT_ARGB1555:
4195                 dspcntr |= DISPPLANE_BGRA555;
4196                 break;
4197         case DRM_FORMAT_RGB565:
4198                 dspcntr |= DISPPLANE_BGRX565;
4199                 break;
4200         case DRM_FORMAT_XRGB8888:
4201                 dspcntr |= DISPPLANE_BGRX888;
4202                 break;
4203         case DRM_FORMAT_XBGR8888:
4204                 dspcntr |= DISPPLANE_RGBX888;
4205                 break;
4206         case DRM_FORMAT_ARGB8888:
4207                 dspcntr |= DISPPLANE_BGRA888;
4208                 break;
4209         case DRM_FORMAT_ABGR8888:
4210                 dspcntr |= DISPPLANE_RGBA888;
4211                 break;
4212         case DRM_FORMAT_XRGB2101010:
4213                 dspcntr |= DISPPLANE_BGRX101010;
4214                 break;
4215         case DRM_FORMAT_XBGR2101010:
4216                 dspcntr |= DISPPLANE_RGBX101010;
4217                 break;
4218         case DRM_FORMAT_ARGB2101010:
4219                 dspcntr |= DISPPLANE_BGRA101010;
4220                 break;
4221         case DRM_FORMAT_ABGR2101010:
4222                 dspcntr |= DISPPLANE_RGBA101010;
4223                 break;
4224         case DRM_FORMAT_XBGR16161616F:
4225                 dspcntr |= DISPPLANE_RGBX161616;
4226                 break;
4227         default:
4228                 MISSING_CASE(fb->format->format);
4229                 return 0;
4230         }
4231
4232         if (INTEL_GEN(dev_priv) >= 4 &&
4233             fb->modifier == I915_FORMAT_MOD_X_TILED)
4234                 dspcntr |= DISPPLANE_TILED;
4235
4236         if (rotation & DRM_MODE_ROTATE_180)
4237                 dspcntr |= DISPPLANE_ROTATE_180;
4238
4239         if (rotation & DRM_MODE_REFLECT_X)
4240                 dspcntr |= DISPPLANE_MIRROR;
4241
4242         return dspcntr;
4243 }
4244
4245 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4246 {
4247         struct drm_i915_private *dev_priv =
4248                 to_i915(plane_state->uapi.plane->dev);
4249         const struct drm_framebuffer *fb = plane_state->hw.fb;
4250         int src_x, src_y, src_w;
4251         u32 offset;
4252         int ret;
4253
4254         ret = intel_plane_compute_gtt(plane_state);
4255         if (ret)
4256                 return ret;
4257
4258         if (!plane_state->uapi.visible)
4259                 return 0;
4260
4261         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4262         src_x = plane_state->uapi.src.x1 >> 16;
4263         src_y = plane_state->uapi.src.y1 >> 16;
4264
4265         /* Undocumented hardware limit on i965/g4x/vlv/chv */
4266         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4267                 return -EINVAL;
4268
4269         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4270
4271         if (INTEL_GEN(dev_priv) >= 4)
4272                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4273                                                             plane_state, 0);
4274         else
4275                 offset = 0;
4276
4277         /*
4278          * Put the final coordinates back so that the src
4279          * coordinate checks will see the right values.
4280          */
4281         drm_rect_translate_to(&plane_state->uapi.src,
4282                               src_x << 16, src_y << 16);
4283
4284         /* HSW/BDW do this automagically in hardware */
4285         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4286                 unsigned int rotation = plane_state->hw.rotation;
4287                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4288                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4289
4290                 if (rotation & DRM_MODE_ROTATE_180) {
4291                         src_x += src_w - 1;
4292                         src_y += src_h - 1;
4293                 } else if (rotation & DRM_MODE_REFLECT_X) {
4294                         src_x += src_w - 1;
4295                 }
4296         }
4297
4298         plane_state->color_plane[0].offset = offset;
4299         plane_state->color_plane[0].x = src_x;
4300         plane_state->color_plane[0].y = src_y;
4301
4302         return 0;
4303 }
4304
4305 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4306 {
4307         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4308         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4309
4310         if (IS_CHERRYVIEW(dev_priv))
4311                 return i9xx_plane == PLANE_B;
4312         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4313                 return false;
4314         else if (IS_GEN(dev_priv, 4))
4315                 return i9xx_plane == PLANE_C;
4316         else
4317                 return i9xx_plane == PLANE_B ||
4318                         i9xx_plane == PLANE_C;
4319 }
4320
4321 static int
4322 i9xx_plane_check(struct intel_crtc_state *crtc_state,
4323                  struct intel_plane_state *plane_state)
4324 {
4325         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4326         int ret;
4327
4328         ret = chv_plane_check_rotation(plane_state);
4329         if (ret)
4330                 return ret;
4331
4332         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
4333                                                   &crtc_state->uapi,
4334                                                   DRM_PLANE_HELPER_NO_SCALING,
4335                                                   DRM_PLANE_HELPER_NO_SCALING,
4336                                                   i9xx_plane_has_windowing(plane),
4337                                                   true);
4338         if (ret)
4339                 return ret;
4340
4341         ret = i9xx_check_plane_surface(plane_state);
4342         if (ret)
4343                 return ret;
4344
4345         if (!plane_state->uapi.visible)
4346                 return 0;
4347
4348         ret = intel_plane_check_src_coordinates(plane_state);
4349         if (ret)
4350                 return ret;
4351
4352         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4353
4354         return 0;
4355 }
4356
4357 static void i9xx_update_plane(struct intel_plane *plane,
4358                               const struct intel_crtc_state *crtc_state,
4359                               const struct intel_plane_state *plane_state)
4360 {
4361         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4362         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4363         u32 linear_offset;
4364         int x = plane_state->color_plane[0].x;
4365         int y = plane_state->color_plane[0].y;
4366         int crtc_x = plane_state->uapi.dst.x1;
4367         int crtc_y = plane_state->uapi.dst.y1;
4368         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4369         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4370         unsigned long irqflags;
4371         u32 dspaddr_offset;
4372         u32 dspcntr;
4373
4374         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4375
4376         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4377
4378         if (INTEL_GEN(dev_priv) >= 4)
4379                 dspaddr_offset = plane_state->color_plane[0].offset;
4380         else
4381                 dspaddr_offset = linear_offset;
4382
4383         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4384
4385         intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
4386                           plane_state->color_plane[0].stride);
4387
4388         if (INTEL_GEN(dev_priv) < 4) {
4389                 /*
4390                  * PLANE_A doesn't actually have a full window
4391                  * generator but let's assume we still need to
4392                  * program whatever is there.
4393                  */
4394                 intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
4395                                   (crtc_y << 16) | crtc_x);
4396                 intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
4397                                   ((crtc_h - 1) << 16) | (crtc_w - 1));
4398         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4399                 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
4400                                   (crtc_y << 16) | crtc_x);
4401                 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
4402                                   ((crtc_h - 1) << 16) | (crtc_w - 1));
4403                 intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
4404         }
4405
4406         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4407                 intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
4408                                   (y << 16) | x);
4409         } else if (INTEL_GEN(dev_priv) >= 4) {
4410                 intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
4411                                   linear_offset);
4412                 intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
4413                                   (y << 16) | x);
4414         }
4415
4416         /*
4417          * The control register self-arms if the plane was previously
4418          * disabled. Try to make the plane enable atomic by writing
4419          * the control register just before the surface register.
4420          */
4421         intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4422         if (INTEL_GEN(dev_priv) >= 4)
4423                 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
4424                                   intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4425         else
4426                 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
4427                                   intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4428
4429         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4430 }
4431
4432 static void i9xx_disable_plane(struct intel_plane *plane,
4433                                const struct intel_crtc_state *crtc_state)
4434 {
4435         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4436         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4437         unsigned long irqflags;
4438         u32 dspcntr;
4439
4440         /*
4441          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4442          * enable on ilk+ affect the pipe bottom color as
4443          * well, so we must configure them even if the plane
4444          * is disabled.
4445          *
4446          * On pre-g4x there is no way to gamma correct the
4447          * pipe bottom color but we'll keep on doing this
4448          * anyway so that the crtc state readout works correctly.
4449          */
4450         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4451
4452         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4453
4454         intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4455         if (INTEL_GEN(dev_priv) >= 4)
4456                 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
4457         else
4458                 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
4459
4460         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4461 }
4462
4463 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4464                                     enum pipe *pipe)
4465 {
4466         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4467         enum intel_display_power_domain power_domain;
4468         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4469         intel_wakeref_t wakeref;
4470         bool ret;
4471         u32 val;
4472
4473         /*
4474          * Not 100% correct for planes that can move between pipes,
4475          * but that's only the case for gen2-4 which don't have any
4476          * display power wells.
4477          */
4478         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4479         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4480         if (!wakeref)
4481                 return false;
4482
4483         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4484
4485         ret = val & DISPLAY_PLANE_ENABLE;
4486
4487         if (INTEL_GEN(dev_priv) >= 5)
4488                 *pipe = plane->pipe;
4489         else
4490                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4491                         DISPPLANE_SEL_PIPE_SHIFT;
4492
4493         intel_display_power_put(dev_priv, power_domain, wakeref);
4494
4495         return ret;
4496 }
4497
4498 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4499 {
4500         struct drm_device *dev = intel_crtc->base.dev;
4501         struct drm_i915_private *dev_priv = to_i915(dev);
4502         unsigned long irqflags;
4503
4504         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4505
4506         intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4507         intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4508         intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4509
4510         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4511 }
4512
4513 /*
4514  * This function detaches (aka. unbinds) unused scalers in hardware
4515  */
4516 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4517 {
4518         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4519         const struct intel_crtc_scaler_state *scaler_state =
4520                 &crtc_state->scaler_state;
4521         int i;
4522
4523         /* loop through and disable scalers that aren't in use */
4524         for (i = 0; i < intel_crtc->num_scalers; i++) {
4525                 if (!scaler_state->scalers[i].in_use)
4526                         skl_detach_scaler(intel_crtc, i);
4527         }
4528 }
4529
4530 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4531                                           int color_plane, unsigned int rotation)
4532 {
4533         /*
4534          * The stride is either expressed as a multiple of 64 bytes chunks for
4535          * linear buffers or in number of tiles for tiled buffers.
4536          */
4537         if (is_surface_linear(fb, color_plane))
4538                 return 64;
4539         else if (drm_rotation_90_or_270(rotation))
4540                 return intel_tile_height(fb, color_plane);
4541         else
4542                 return intel_tile_width_bytes(fb, color_plane);
4543 }
4544
4545 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4546                      int color_plane)
4547 {
4548         const struct drm_framebuffer *fb = plane_state->hw.fb;
4549         unsigned int rotation = plane_state->hw.rotation;
4550         u32 stride = plane_state->color_plane[color_plane].stride;
4551
4552         if (color_plane >= fb->format->num_planes)
4553                 return 0;
4554
4555         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4556 }
4557
4558 static u32 skl_plane_ctl_format(u32 pixel_format)
4559 {
4560         switch (pixel_format) {
4561         case DRM_FORMAT_C8:
4562                 return PLANE_CTL_FORMAT_INDEXED;
4563         case DRM_FORMAT_RGB565:
4564                 return PLANE_CTL_FORMAT_RGB_565;
4565         case DRM_FORMAT_XBGR8888:
4566         case DRM_FORMAT_ABGR8888:
4567                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4568         case DRM_FORMAT_XRGB8888:
4569         case DRM_FORMAT_ARGB8888:
4570                 return PLANE_CTL_FORMAT_XRGB_8888;
4571         case DRM_FORMAT_XBGR2101010:
4572         case DRM_FORMAT_ABGR2101010:
4573                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4574         case DRM_FORMAT_XRGB2101010:
4575         case DRM_FORMAT_ARGB2101010:
4576                 return PLANE_CTL_FORMAT_XRGB_2101010;
4577         case DRM_FORMAT_XBGR16161616F:
4578         case DRM_FORMAT_ABGR16161616F:
4579                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4580         case DRM_FORMAT_XRGB16161616F:
4581         case DRM_FORMAT_ARGB16161616F:
4582                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4583         case DRM_FORMAT_YUYV:
4584                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4585         case DRM_FORMAT_YVYU:
4586                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4587         case DRM_FORMAT_UYVY:
4588                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4589         case DRM_FORMAT_VYUY:
4590                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4591         case DRM_FORMAT_NV12:
4592                 return PLANE_CTL_FORMAT_NV12;
4593         case DRM_FORMAT_P010:
4594                 return PLANE_CTL_FORMAT_P010;
4595         case DRM_FORMAT_P012:
4596                 return PLANE_CTL_FORMAT_P012;
4597         case DRM_FORMAT_P016:
4598                 return PLANE_CTL_FORMAT_P016;
4599         case DRM_FORMAT_Y210:
4600                 return PLANE_CTL_FORMAT_Y210;
4601         case DRM_FORMAT_Y212:
4602                 return PLANE_CTL_FORMAT_Y212;
4603         case DRM_FORMAT_Y216:
4604                 return PLANE_CTL_FORMAT_Y216;
4605         case DRM_FORMAT_XVYU2101010:
4606                 return PLANE_CTL_FORMAT_Y410;
4607         case DRM_FORMAT_XVYU12_16161616:
4608                 return PLANE_CTL_FORMAT_Y412;
4609         case DRM_FORMAT_XVYU16161616:
4610                 return PLANE_CTL_FORMAT_Y416;
4611         default:
4612                 MISSING_CASE(pixel_format);
4613         }
4614
4615         return 0;
4616 }
4617
4618 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4619 {
4620         if (!plane_state->hw.fb->format->has_alpha)
4621                 return PLANE_CTL_ALPHA_DISABLE;
4622
4623         switch (plane_state->hw.pixel_blend_mode) {
4624         case DRM_MODE_BLEND_PIXEL_NONE:
4625                 return PLANE_CTL_ALPHA_DISABLE;
4626         case DRM_MODE_BLEND_PREMULTI:
4627                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4628         case DRM_MODE_BLEND_COVERAGE:
4629                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4630         default:
4631                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4632                 return PLANE_CTL_ALPHA_DISABLE;
4633         }
4634 }
4635
4636 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4637 {
4638         if (!plane_state->hw.fb->format->has_alpha)
4639                 return PLANE_COLOR_ALPHA_DISABLE;
4640
4641         switch (plane_state->hw.pixel_blend_mode) {
4642         case DRM_MODE_BLEND_PIXEL_NONE:
4643                 return PLANE_COLOR_ALPHA_DISABLE;
4644         case DRM_MODE_BLEND_PREMULTI:
4645                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4646         case DRM_MODE_BLEND_COVERAGE:
4647                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4648         default:
4649                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4650                 return PLANE_COLOR_ALPHA_DISABLE;
4651         }
4652 }
4653
4654 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4655 {
4656         switch (fb_modifier) {
4657         case DRM_FORMAT_MOD_LINEAR:
4658                 break;
4659         case I915_FORMAT_MOD_X_TILED:
4660                 return PLANE_CTL_TILED_X;
4661         case I915_FORMAT_MOD_Y_TILED:
4662                 return PLANE_CTL_TILED_Y;
4663         case I915_FORMAT_MOD_Y_TILED_CCS:
4664                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4665         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4666                 return PLANE_CTL_TILED_Y |
4667                        PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4668                        PLANE_CTL_CLEAR_COLOR_DISABLE;
4669         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4670                 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4671         case I915_FORMAT_MOD_Yf_TILED:
4672                 return PLANE_CTL_TILED_YF;
4673         case I915_FORMAT_MOD_Yf_TILED_CCS:
4674                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4675         default:
4676                 MISSING_CASE(fb_modifier);
4677         }
4678
4679         return 0;
4680 }
4681
4682 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4683 {
4684         switch (rotate) {
4685         case DRM_MODE_ROTATE_0:
4686                 break;
4687         /*
4688          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4689          * while i915 HW rotation is clockwise, thats why this swapping.
4690          */
4691         case DRM_MODE_ROTATE_90:
4692                 return PLANE_CTL_ROTATE_270;
4693         case DRM_MODE_ROTATE_180:
4694                 return PLANE_CTL_ROTATE_180;
4695         case DRM_MODE_ROTATE_270:
4696                 return PLANE_CTL_ROTATE_90;
4697         default:
4698                 MISSING_CASE(rotate);
4699         }
4700
4701         return 0;
4702 }
4703
4704 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4705 {
4706         switch (reflect) {
4707         case 0:
4708                 break;
4709         case DRM_MODE_REFLECT_X:
4710                 return PLANE_CTL_FLIP_HORIZONTAL;
4711         case DRM_MODE_REFLECT_Y:
4712         default:
4713                 MISSING_CASE(reflect);
4714         }
4715
4716         return 0;
4717 }
4718
4719 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4720 {
4721         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4722         u32 plane_ctl = 0;
4723
4724         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4725                 return plane_ctl;
4726
4727         if (crtc_state->gamma_enable)
4728                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4729
4730         if (crtc_state->csc_enable)
4731                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4732
4733         return plane_ctl;
4734 }
4735
4736 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4737                   const struct intel_plane_state *plane_state)
4738 {
4739         struct drm_i915_private *dev_priv =
4740                 to_i915(plane_state->uapi.plane->dev);
4741         const struct drm_framebuffer *fb = plane_state->hw.fb;
4742         unsigned int rotation = plane_state->hw.rotation;
4743         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4744         u32 plane_ctl;
4745
4746         plane_ctl = PLANE_CTL_ENABLE;
4747
4748         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4749                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4750                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4751
4752                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4753                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4754
4755                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4756                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4757         }
4758
4759         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4760         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4761         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4762
4763         if (INTEL_GEN(dev_priv) >= 10)
4764                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4765                                                 DRM_MODE_REFLECT_MASK);
4766
4767         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4768                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4769         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4770                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4771
4772         return plane_ctl;
4773 }
4774
4775 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4776 {
4777         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4778         u32 plane_color_ctl = 0;
4779
4780         if (INTEL_GEN(dev_priv) >= 11)
4781                 return plane_color_ctl;
4782
4783         if (crtc_state->gamma_enable)
4784                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4785
4786         if (crtc_state->csc_enable)
4787                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4788
4789         return plane_color_ctl;
4790 }
4791
4792 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4793                         const struct intel_plane_state *plane_state)
4794 {
4795         struct drm_i915_private *dev_priv =
4796                 to_i915(plane_state->uapi.plane->dev);
4797         const struct drm_framebuffer *fb = plane_state->hw.fb;
4798         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4799         u32 plane_color_ctl = 0;
4800
4801         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4802         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4803
4804         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4805                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4806                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4807                 else
4808                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4809
4810                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4811                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4812         } else if (fb->format->is_yuv) {
4813                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4814         }
4815
4816         return plane_color_ctl;
4817 }
4818
4819 static int
4820 __intel_display_resume(struct drm_device *dev,
4821                        struct drm_atomic_state *state,
4822                        struct drm_modeset_acquire_ctx *ctx)
4823 {
4824         struct drm_crtc_state *crtc_state;
4825         struct drm_crtc *crtc;
4826         int i, ret;
4827
4828         intel_modeset_setup_hw_state(dev, ctx);
4829         intel_vga_redisable(to_i915(dev));
4830
4831         if (!state)
4832                 return 0;
4833
4834         /*
4835          * We've duplicated the state, pointers to the old state are invalid.
4836          *
4837          * Don't attempt to use the old state until we commit the duplicated state.
4838          */
4839         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4840                 /*
4841                  * Force recalculation even if we restore
4842                  * current state. With fast modeset this may not result
4843                  * in a modeset when the state is compatible.
4844                  */
4845                 crtc_state->mode_changed = true;
4846         }
4847
4848         /* ignore any reset values/BIOS leftovers in the WM registers */
4849         if (!HAS_GMCH(to_i915(dev)))
4850                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4851
4852         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4853
4854         drm_WARN_ON(dev, ret == -EDEADLK);
4855         return ret;
4856 }
4857
4858 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4859 {
4860         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4861                 intel_has_gpu_reset(&dev_priv->gt));
4862 }
4863
4864 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4865 {
4866         struct drm_device *dev = &dev_priv->drm;
4867         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4868         struct drm_atomic_state *state;
4869         int ret;
4870
4871         /* reset doesn't touch the display */
4872         if (!i915_modparams.force_reset_modeset_test &&
4873             !gpu_reset_clobbers_display(dev_priv))
4874                 return;
4875
4876         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4877         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4878         smp_mb__after_atomic();
4879         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4880
4881         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4882                 drm_dbg_kms(&dev_priv->drm,
4883                             "Modeset potentially stuck, unbreaking through wedging\n");
4884                 intel_gt_set_wedged(&dev_priv->gt);
4885         }
4886
4887         /*
4888          * Need mode_config.mutex so that we don't
4889          * trample ongoing ->detect() and whatnot.
4890          */
4891         mutex_lock(&dev->mode_config.mutex);
4892         drm_modeset_acquire_init(ctx, 0);
4893         while (1) {
4894                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4895                 if (ret != -EDEADLK)
4896                         break;
4897
4898                 drm_modeset_backoff(ctx);
4899         }
4900         /*
4901          * Disabling the crtcs gracefully seems nicer. Also the
4902          * g33 docs say we should at least disable all the planes.
4903          */
4904         state = drm_atomic_helper_duplicate_state(dev, ctx);
4905         if (IS_ERR(state)) {
4906                 ret = PTR_ERR(state);
4907                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4908                         ret);
4909                 return;
4910         }
4911
4912         ret = drm_atomic_helper_disable_all(dev, ctx);
4913         if (ret) {
4914                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4915                         ret);
4916                 drm_atomic_state_put(state);
4917                 return;
4918         }
4919
4920         dev_priv->modeset_restore_state = state;
4921         state->acquire_ctx = ctx;
4922 }
4923
4924 void intel_finish_reset(struct drm_i915_private *dev_priv)
4925 {
4926         struct drm_device *dev = &dev_priv->drm;
4927         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4928         struct drm_atomic_state *state;
4929         int ret;
4930
4931         /* reset doesn't touch the display */
4932         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4933                 return;
4934
4935         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4936         if (!state)
4937                 goto unlock;
4938
4939         /* reset doesn't touch the display */
4940         if (!gpu_reset_clobbers_display(dev_priv)) {
4941                 /* for testing only restore the display */
4942                 ret = __intel_display_resume(dev, state, ctx);
4943                 if (ret)
4944                         drm_err(&dev_priv->drm,
4945                                 "Restoring old state failed with %i\n", ret);
4946         } else {
4947                 /*
4948                  * The display has been reset as well,
4949                  * so need a full re-initialization.
4950                  */
4951                 intel_pps_unlock_regs_wa(dev_priv);
4952                 intel_modeset_init_hw(dev_priv);
4953                 intel_init_clock_gating(dev_priv);
4954
4955                 spin_lock_irq(&dev_priv->irq_lock);
4956                 if (dev_priv->display.hpd_irq_setup)
4957                         dev_priv->display.hpd_irq_setup(dev_priv);
4958                 spin_unlock_irq(&dev_priv->irq_lock);
4959
4960                 ret = __intel_display_resume(dev, state, ctx);
4961                 if (ret)
4962                         drm_err(&dev_priv->drm,
4963                                 "Restoring old state failed with %i\n", ret);
4964
4965                 intel_hpd_init(dev_priv);
4966         }
4967
4968         drm_atomic_state_put(state);
4969 unlock:
4970         drm_modeset_drop_locks(ctx);
4971         drm_modeset_acquire_fini(ctx);
4972         mutex_unlock(&dev->mode_config.mutex);
4973
4974         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4975 }
4976
4977 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4978 {
4979         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4980         enum pipe pipe = crtc->pipe;
4981         u32 tmp;
4982
4983         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
4984
4985         /*
4986          * Display WA #1153: icl
4987          * enable hardware to bypass the alpha math
4988          * and rounding for per-pixel values 00 and 0xff
4989          */
4990         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4991         /*
4992          * Display WA # 1605353570: icl
4993          * Set the pixel rounding bit to 1 for allowing
4994          * passthrough of Frame buffer pixels unmodified
4995          * across pipe
4996          */
4997         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4998         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
4999 }
5000
5001 static void intel_fdi_normal_train(struct intel_crtc *crtc)
5002 {
5003         struct drm_device *dev = crtc->base.dev;
5004         struct drm_i915_private *dev_priv = to_i915(dev);
5005         enum pipe pipe = crtc->pipe;
5006         i915_reg_t reg;
5007         u32 temp;
5008
5009         /* enable normal train */
5010         reg = FDI_TX_CTL(pipe);
5011         temp = intel_de_read(dev_priv, reg);
5012         if (IS_IVYBRIDGE(dev_priv)) {
5013                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5014                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
5015         } else {
5016                 temp &= ~FDI_LINK_TRAIN_NONE;
5017                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
5018         }
5019         intel_de_write(dev_priv, reg, temp);
5020
5021         reg = FDI_RX_CTL(pipe);
5022         temp = intel_de_read(dev_priv, reg);
5023         if (HAS_PCH_CPT(dev_priv)) {
5024                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5025                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
5026         } else {
5027                 temp &= ~FDI_LINK_TRAIN_NONE;
5028                 temp |= FDI_LINK_TRAIN_NONE;
5029         }
5030         intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
5031
5032         /* wait one idle pattern time */
5033         intel_de_posting_read(dev_priv, reg);
5034         udelay(1000);
5035
5036         /* IVB wants error correction enabled */
5037         if (IS_IVYBRIDGE(dev_priv))
5038                 intel_de_write(dev_priv, reg,
5039                                intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
5040 }
5041
5042 /* The FDI link training functions for ILK/Ibexpeak. */
5043 static void ilk_fdi_link_train(struct intel_crtc *crtc,
5044                                const struct intel_crtc_state *crtc_state)
5045 {
5046         struct drm_device *dev = crtc->base.dev;
5047         struct drm_i915_private *dev_priv = to_i915(dev);
5048         enum pipe pipe = crtc->pipe;
5049         i915_reg_t reg;
5050         u32 temp, tries;
5051
5052         /* FDI needs bits from pipe first */
5053         assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
5054
5055         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5056            for train result */
5057         reg = FDI_RX_IMR(pipe);
5058         temp = intel_de_read(dev_priv, reg);
5059         temp &= ~FDI_RX_SYMBOL_LOCK;
5060         temp &= ~FDI_RX_BIT_LOCK;
5061         intel_de_write(dev_priv, reg, temp);
5062         intel_de_read(dev_priv, reg);
5063         udelay(150);
5064
5065         /* enable CPU FDI TX and PCH FDI RX */
5066         reg = FDI_TX_CTL(pipe);
5067         temp = intel_de_read(dev_priv, reg);
5068         temp &= ~FDI_DP_PORT_WIDTH_MASK;
5069         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5070         temp &= ~FDI_LINK_TRAIN_NONE;
5071         temp |= FDI_LINK_TRAIN_PATTERN_1;
5072         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5073
5074         reg = FDI_RX_CTL(pipe);
5075         temp = intel_de_read(dev_priv, reg);
5076         temp &= ~FDI_LINK_TRAIN_NONE;
5077         temp |= FDI_LINK_TRAIN_PATTERN_1;
5078         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5079
5080         intel_de_posting_read(dev_priv, reg);
5081         udelay(150);
5082
5083         /* Ironlake workaround, enable clock pointer after FDI enable*/
5084         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5085                        FDI_RX_PHASE_SYNC_POINTER_OVR);
5086         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5087                        FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
5088
5089         reg = FDI_RX_IIR(pipe);
5090         for (tries = 0; tries < 5; tries++) {
5091                 temp = intel_de_read(dev_priv, reg);
5092                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5093
5094                 if ((temp & FDI_RX_BIT_LOCK)) {
5095                         drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
5096                         intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
5097                         break;
5098                 }
5099         }
5100         if (tries == 5)
5101                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5102
5103         /* Train 2 */
5104         reg = FDI_TX_CTL(pipe);
5105         temp = intel_de_read(dev_priv, reg);
5106         temp &= ~FDI_LINK_TRAIN_NONE;
5107         temp |= FDI_LINK_TRAIN_PATTERN_2;
5108         intel_de_write(dev_priv, reg, temp);
5109
5110         reg = FDI_RX_CTL(pipe);
5111         temp = intel_de_read(dev_priv, reg);
5112         temp &= ~FDI_LINK_TRAIN_NONE;
5113         temp |= FDI_LINK_TRAIN_PATTERN_2;
5114         intel_de_write(dev_priv, reg, temp);
5115
5116         intel_de_posting_read(dev_priv, reg);
5117         udelay(150);
5118
5119         reg = FDI_RX_IIR(pipe);
5120         for (tries = 0; tries < 5; tries++) {
5121                 temp = intel_de_read(dev_priv, reg);
5122                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5123
5124                 if (temp & FDI_RX_SYMBOL_LOCK) {
5125                         intel_de_write(dev_priv, reg,
5126                                        temp | FDI_RX_SYMBOL_LOCK);
5127                         drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
5128                         break;
5129                 }
5130         }
5131         if (tries == 5)
5132                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5133
5134         drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
5135
5136 }
5137
5138 static const int snb_b_fdi_train_param[] = {
5139         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
5140         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
5141         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
5142         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
5143 };
5144
5145 /* The FDI link training functions for SNB/Cougarpoint. */
5146 static void gen6_fdi_link_train(struct intel_crtc *crtc,
5147                                 const struct intel_crtc_state *crtc_state)
5148 {
5149         struct drm_device *dev = crtc->base.dev;
5150         struct drm_i915_private *dev_priv = to_i915(dev);
5151         enum pipe pipe = crtc->pipe;
5152         i915_reg_t reg;
5153         u32 temp, i, retry;
5154
5155         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5156            for train result */
5157         reg = FDI_RX_IMR(pipe);
5158         temp = intel_de_read(dev_priv, reg);
5159         temp &= ~FDI_RX_SYMBOL_LOCK;
5160         temp &= ~FDI_RX_BIT_LOCK;
5161         intel_de_write(dev_priv, reg, temp);
5162
5163         intel_de_posting_read(dev_priv, reg);
5164         udelay(150);
5165
5166         /* enable CPU FDI TX and PCH FDI RX */
5167         reg = FDI_TX_CTL(pipe);
5168         temp = intel_de_read(dev_priv, reg);
5169         temp &= ~FDI_DP_PORT_WIDTH_MASK;
5170         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5171         temp &= ~FDI_LINK_TRAIN_NONE;
5172         temp |= FDI_LINK_TRAIN_PATTERN_1;
5173         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5174         /* SNB-B */
5175         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5176         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5177
5178         intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5179                        FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5180
5181         reg = FDI_RX_CTL(pipe);
5182         temp = intel_de_read(dev_priv, reg);
5183         if (HAS_PCH_CPT(dev_priv)) {
5184                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5185                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5186         } else {
5187                 temp &= ~FDI_LINK_TRAIN_NONE;
5188                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5189         }
5190         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5191
5192         intel_de_posting_read(dev_priv, reg);
5193         udelay(150);
5194
5195         for (i = 0; i < 4; i++) {
5196                 reg = FDI_TX_CTL(pipe);
5197                 temp = intel_de_read(dev_priv, reg);
5198                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5199                 temp |= snb_b_fdi_train_param[i];
5200                 intel_de_write(dev_priv, reg, temp);
5201
5202                 intel_de_posting_read(dev_priv, reg);
5203                 udelay(500);
5204
5205                 for (retry = 0; retry < 5; retry++) {
5206                         reg = FDI_RX_IIR(pipe);
5207                         temp = intel_de_read(dev_priv, reg);
5208                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5209                         if (temp & FDI_RX_BIT_LOCK) {
5210                                 intel_de_write(dev_priv, reg,
5211                                                temp | FDI_RX_BIT_LOCK);
5212                                 drm_dbg_kms(&dev_priv->drm,
5213                                             "FDI train 1 done.\n");
5214                                 break;
5215                         }
5216                         udelay(50);
5217                 }
5218                 if (retry < 5)
5219                         break;
5220         }
5221         if (i == 4)
5222                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5223
5224         /* Train 2 */
5225         reg = FDI_TX_CTL(pipe);
5226         temp = intel_de_read(dev_priv, reg);
5227         temp &= ~FDI_LINK_TRAIN_NONE;
5228         temp |= FDI_LINK_TRAIN_PATTERN_2;
5229         if (IS_GEN(dev_priv, 6)) {
5230                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5231                 /* SNB-B */
5232                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5233         }
5234         intel_de_write(dev_priv, reg, temp);
5235
5236         reg = FDI_RX_CTL(pipe);
5237         temp = intel_de_read(dev_priv, reg);
5238         if (HAS_PCH_CPT(dev_priv)) {
5239                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5240                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5241         } else {
5242                 temp &= ~FDI_LINK_TRAIN_NONE;
5243                 temp |= FDI_LINK_TRAIN_PATTERN_2;
5244         }
5245         intel_de_write(dev_priv, reg, temp);
5246
5247         intel_de_posting_read(dev_priv, reg);
5248         udelay(150);
5249
5250         for (i = 0; i < 4; i++) {
5251                 reg = FDI_TX_CTL(pipe);
5252                 temp = intel_de_read(dev_priv, reg);
5253                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5254                 temp |= snb_b_fdi_train_param[i];
5255                 intel_de_write(dev_priv, reg, temp);
5256
5257                 intel_de_posting_read(dev_priv, reg);
5258                 udelay(500);
5259
5260                 for (retry = 0; retry < 5; retry++) {
5261                         reg = FDI_RX_IIR(pipe);
5262                         temp = intel_de_read(dev_priv, reg);
5263                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5264                         if (temp & FDI_RX_SYMBOL_LOCK) {
5265                                 intel_de_write(dev_priv, reg,
5266                                                temp | FDI_RX_SYMBOL_LOCK);
5267                                 drm_dbg_kms(&dev_priv->drm,
5268                                             "FDI train 2 done.\n");
5269                                 break;
5270                         }
5271                         udelay(50);
5272                 }
5273                 if (retry < 5)
5274                         break;
5275         }
5276         if (i == 4)
5277                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5278
5279         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5280 }
5281
5282 /* Manual link training for Ivy Bridge A0 parts */
5283 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5284                                       const struct intel_crtc_state *crtc_state)
5285 {
5286         struct drm_device *dev = crtc->base.dev;
5287         struct drm_i915_private *dev_priv = to_i915(dev);
5288         enum pipe pipe = crtc->pipe;
5289         i915_reg_t reg;
5290         u32 temp, i, j;
5291
5292         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5293            for train result */
5294         reg = FDI_RX_IMR(pipe);
5295         temp = intel_de_read(dev_priv, reg);
5296         temp &= ~FDI_RX_SYMBOL_LOCK;
5297         temp &= ~FDI_RX_BIT_LOCK;
5298         intel_de_write(dev_priv, reg, temp);
5299
5300         intel_de_posting_read(dev_priv, reg);
5301         udelay(150);
5302
5303         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
5304                     intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
5305
5306         /* Try each vswing and preemphasis setting twice before moving on */
5307         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5308                 /* disable first in case we need to retry */
5309                 reg = FDI_TX_CTL(pipe);
5310                 temp = intel_de_read(dev_priv, reg);
5311                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5312                 temp &= ~FDI_TX_ENABLE;
5313                 intel_de_write(dev_priv, reg, temp);
5314
5315                 reg = FDI_RX_CTL(pipe);
5316                 temp = intel_de_read(dev_priv, reg);
5317                 temp &= ~FDI_LINK_TRAIN_AUTO;
5318                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5319                 temp &= ~FDI_RX_ENABLE;
5320                 intel_de_write(dev_priv, reg, temp);
5321
5322                 /* enable CPU FDI TX and PCH FDI RX */
5323                 reg = FDI_TX_CTL(pipe);
5324                 temp = intel_de_read(dev_priv, reg);
5325                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5326                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5327                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5328                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5329                 temp |= snb_b_fdi_train_param[j/2];
5330                 temp |= FDI_COMPOSITE_SYNC;
5331                 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5332
5333                 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5334                                FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5335
5336                 reg = FDI_RX_CTL(pipe);
5337                 temp = intel_de_read(dev_priv, reg);
5338                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5339                 temp |= FDI_COMPOSITE_SYNC;
5340                 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5341
5342                 intel_de_posting_read(dev_priv, reg);
5343                 udelay(1); /* should be 0.5us */
5344
5345                 for (i = 0; i < 4; i++) {
5346                         reg = FDI_RX_IIR(pipe);
5347                         temp = intel_de_read(dev_priv, reg);
5348                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5349
5350                         if (temp & FDI_RX_BIT_LOCK ||
5351                             (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
5352                                 intel_de_write(dev_priv, reg,
5353                                                temp | FDI_RX_BIT_LOCK);
5354                                 drm_dbg_kms(&dev_priv->drm,
5355                                             "FDI train 1 done, level %i.\n",
5356                                             i);
5357                                 break;
5358                         }
5359                         udelay(1); /* should be 0.5us */
5360                 }
5361                 if (i == 4) {
5362                         drm_dbg_kms(&dev_priv->drm,
5363                                     "FDI train 1 fail on vswing %d\n", j / 2);
5364                         continue;
5365                 }
5366
5367                 /* Train 2 */
5368                 reg = FDI_TX_CTL(pipe);
5369                 temp = intel_de_read(dev_priv, reg);
5370                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5371                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5372                 intel_de_write(dev_priv, reg, temp);
5373
5374                 reg = FDI_RX_CTL(pipe);
5375                 temp = intel_de_read(dev_priv, reg);
5376                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5377                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5378                 intel_de_write(dev_priv, reg, temp);
5379
5380                 intel_de_posting_read(dev_priv, reg);
5381                 udelay(2); /* should be 1.5us */
5382
5383                 for (i = 0; i < 4; i++) {
5384                         reg = FDI_RX_IIR(pipe);
5385                         temp = intel_de_read(dev_priv, reg);
5386                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5387
5388                         if (temp & FDI_RX_SYMBOL_LOCK ||
5389                             (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
5390                                 intel_de_write(dev_priv, reg,
5391                                                temp | FDI_RX_SYMBOL_LOCK);
5392                                 drm_dbg_kms(&dev_priv->drm,
5393                                             "FDI train 2 done, level %i.\n",
5394                                             i);
5395                                 goto train_done;
5396                         }
5397                         udelay(2); /* should be 1.5us */
5398                 }
5399                 if (i == 4)
5400                         drm_dbg_kms(&dev_priv->drm,
5401                                     "FDI train 2 fail on vswing %d\n", j / 2);
5402         }
5403
5404 train_done:
5405         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5406 }
5407
5408 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5409 {
5410         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5411         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5412         enum pipe pipe = intel_crtc->pipe;
5413         i915_reg_t reg;
5414         u32 temp;
5415
5416         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5417         reg = FDI_RX_CTL(pipe);
5418         temp = intel_de_read(dev_priv, reg);
5419         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5420         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5421         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5422         intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
5423
5424         intel_de_posting_read(dev_priv, reg);
5425         udelay(200);
5426
5427         /* Switch from Rawclk to PCDclk */
5428         temp = intel_de_read(dev_priv, reg);
5429         intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
5430
5431         intel_de_posting_read(dev_priv, reg);
5432         udelay(200);
5433
5434         /* Enable CPU FDI TX PLL, always on for Ironlake */
5435         reg = FDI_TX_CTL(pipe);
5436         temp = intel_de_read(dev_priv, reg);
5437         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5438                 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
5439
5440                 intel_de_posting_read(dev_priv, reg);
5441                 udelay(100);
5442         }
5443 }
5444
5445 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
5446 {
5447         struct drm_device *dev = intel_crtc->base.dev;
5448         struct drm_i915_private *dev_priv = to_i915(dev);
5449         enum pipe pipe = intel_crtc->pipe;
5450         i915_reg_t reg;
5451         u32 temp;
5452
5453         /* Switch from PCDclk to Rawclk */
5454         reg = FDI_RX_CTL(pipe);
5455         temp = intel_de_read(dev_priv, reg);
5456         intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
5457
5458         /* Disable CPU FDI TX PLL */
5459         reg = FDI_TX_CTL(pipe);
5460         temp = intel_de_read(dev_priv, reg);
5461         intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
5462
5463         intel_de_posting_read(dev_priv, reg);
5464         udelay(100);
5465
5466         reg = FDI_RX_CTL(pipe);
5467         temp = intel_de_read(dev_priv, reg);
5468         intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
5469
5470         /* Wait for the clocks to turn off. */
5471         intel_de_posting_read(dev_priv, reg);
5472         udelay(100);
5473 }
5474
5475 static void ilk_fdi_disable(struct intel_crtc *crtc)
5476 {
5477         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5478         enum pipe pipe = crtc->pipe;
5479         i915_reg_t reg;
5480         u32 temp;
5481
5482         /* disable CPU FDI tx and PCH FDI rx */
5483         reg = FDI_TX_CTL(pipe);
5484         temp = intel_de_read(dev_priv, reg);
5485         intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5486         intel_de_posting_read(dev_priv, reg);
5487
5488         reg = FDI_RX_CTL(pipe);
5489         temp = intel_de_read(dev_priv, reg);
5490         temp &= ~(0x7 << 16);
5491         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5492         intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5493
5494         intel_de_posting_read(dev_priv, reg);
5495         udelay(100);
5496
5497         /* Ironlake workaround, disable clock pointer after downing FDI */
5498         if (HAS_PCH_IBX(dev_priv))
5499                 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5500                                FDI_RX_PHASE_SYNC_POINTER_OVR);
5501
5502         /* still set train pattern 1 */
5503         reg = FDI_TX_CTL(pipe);
5504         temp = intel_de_read(dev_priv, reg);
5505         temp &= ~FDI_LINK_TRAIN_NONE;
5506         temp |= FDI_LINK_TRAIN_PATTERN_1;
5507         intel_de_write(dev_priv, reg, temp);
5508
5509         reg = FDI_RX_CTL(pipe);
5510         temp = intel_de_read(dev_priv, reg);
5511         if (HAS_PCH_CPT(dev_priv)) {
5512                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5513                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5514         } else {
5515                 temp &= ~FDI_LINK_TRAIN_NONE;
5516                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5517         }
5518         /* BPC in FDI rx is consistent with that in PIPECONF */
5519         temp &= ~(0x07 << 16);
5520         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5521         intel_de_write(dev_priv, reg, temp);
5522
5523         intel_de_posting_read(dev_priv, reg);
5524         udelay(100);
5525 }
5526
5527 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5528 {
5529         struct drm_crtc *crtc;
5530         bool cleanup_done;
5531
5532         drm_for_each_crtc(crtc, &dev_priv->drm) {
5533                 struct drm_crtc_commit *commit;
5534                 spin_lock(&crtc->commit_lock);
5535                 commit = list_first_entry_or_null(&crtc->commit_list,
5536                                                   struct drm_crtc_commit, commit_entry);
5537                 cleanup_done = commit ?
5538                         try_wait_for_completion(&commit->cleanup_done) : true;
5539                 spin_unlock(&crtc->commit_lock);
5540
5541                 if (cleanup_done)
5542                         continue;
5543
5544                 drm_crtc_wait_one_vblank(crtc);
5545
5546                 return true;
5547         }
5548
5549         return false;
5550 }
5551
5552 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5553 {
5554         u32 temp;
5555
5556         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5557
5558         mutex_lock(&dev_priv->sb_lock);
5559
5560         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5561         temp |= SBI_SSCCTL_DISABLE;
5562         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5563
5564         mutex_unlock(&dev_priv->sb_lock);
5565 }
5566
5567 /* Program iCLKIP clock to the desired frequency */
5568 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5569 {
5570         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5571         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5572         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5573         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5574         u32 temp;
5575
5576         lpt_disable_iclkip(dev_priv);
5577
5578         /* The iCLK virtual clock root frequency is in MHz,
5579          * but the adjusted_mode->crtc_clock in in KHz. To get the
5580          * divisors, it is necessary to divide one by another, so we
5581          * convert the virtual clock precision to KHz here for higher
5582          * precision.
5583          */
5584         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5585                 u32 iclk_virtual_root_freq = 172800 * 1000;
5586                 u32 iclk_pi_range = 64;
5587                 u32 desired_divisor;
5588
5589                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5590                                                     clock << auxdiv);
5591                 divsel = (desired_divisor / iclk_pi_range) - 2;
5592                 phaseinc = desired_divisor % iclk_pi_range;
5593
5594                 /*
5595                  * Near 20MHz is a corner case which is
5596                  * out of range for the 7-bit divisor
5597                  */
5598                 if (divsel <= 0x7f)
5599                         break;
5600         }
5601
5602         /* This should not happen with any sane values */
5603         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5604                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5605         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
5606                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5607
5608         drm_dbg_kms(&dev_priv->drm,
5609                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5610                     clock, auxdiv, divsel, phasedir, phaseinc);
5611
5612         mutex_lock(&dev_priv->sb_lock);
5613
5614         /* Program SSCDIVINTPHASE6 */
5615         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5616         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5617         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5618         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5619         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5620         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5621         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5622         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5623
5624         /* Program SSCAUXDIV */
5625         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5626         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5627         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5628         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5629
5630         /* Enable modulator and associated divider */
5631         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5632         temp &= ~SBI_SSCCTL_DISABLE;
5633         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5634
5635         mutex_unlock(&dev_priv->sb_lock);
5636
5637         /* Wait for initialization time */
5638         udelay(24);
5639
5640         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5641 }
5642
5643 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5644 {
5645         u32 divsel, phaseinc, auxdiv;
5646         u32 iclk_virtual_root_freq = 172800 * 1000;
5647         u32 iclk_pi_range = 64;
5648         u32 desired_divisor;
5649         u32 temp;
5650
5651         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5652                 return 0;
5653
5654         mutex_lock(&dev_priv->sb_lock);
5655
5656         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5657         if (temp & SBI_SSCCTL_DISABLE) {
5658                 mutex_unlock(&dev_priv->sb_lock);
5659                 return 0;
5660         }
5661
5662         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5663         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5664                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5665         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5666                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5667
5668         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5669         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5670                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5671
5672         mutex_unlock(&dev_priv->sb_lock);
5673
5674         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5675
5676         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5677                                  desired_divisor << auxdiv);
5678 }
5679
5680 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5681                                            enum pipe pch_transcoder)
5682 {
5683         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5684         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5685         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5686
5687         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5688                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5689         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5690                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5691         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5692                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5693
5694         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5695                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5696         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5697                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5698         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5699                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5700         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5701                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5702 }
5703
5704 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5705 {
5706         u32 temp;
5707
5708         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5709         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5710                 return;
5711
5712         drm_WARN_ON(&dev_priv->drm,
5713                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
5714                     FDI_RX_ENABLE);
5715         drm_WARN_ON(&dev_priv->drm,
5716                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
5717                     FDI_RX_ENABLE);
5718
5719         temp &= ~FDI_BC_BIFURCATION_SELECT;
5720         if (enable)
5721                 temp |= FDI_BC_BIFURCATION_SELECT;
5722
5723         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5724                     enable ? "en" : "dis");
5725         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5726         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5727 }
5728
5729 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5730 {
5731         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5732         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5733
5734         switch (crtc->pipe) {
5735         case PIPE_A:
5736                 break;
5737         case PIPE_B:
5738                 if (crtc_state->fdi_lanes > 2)
5739                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5740                 else
5741                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5742
5743                 break;
5744         case PIPE_C:
5745                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5746
5747                 break;
5748         default:
5749                 BUG();
5750         }
5751 }
5752
5753 /*
5754  * Finds the encoder associated with the given CRTC. This can only be
5755  * used when we know that the CRTC isn't feeding multiple encoders!
5756  */
5757 static struct intel_encoder *
5758 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5759                            const struct intel_crtc_state *crtc_state)
5760 {
5761         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5762         const struct drm_connector_state *connector_state;
5763         const struct drm_connector *connector;
5764         struct intel_encoder *encoder = NULL;
5765         int num_encoders = 0;
5766         int i;
5767
5768         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5769                 if (connector_state->crtc != &crtc->base)
5770                         continue;
5771
5772                 encoder = to_intel_encoder(connector_state->best_encoder);
5773                 num_encoders++;
5774         }
5775
5776         drm_WARN(encoder->base.dev, num_encoders != 1,
5777                  "%d encoders for pipe %c\n",
5778                  num_encoders, pipe_name(crtc->pipe));
5779
5780         return encoder;
5781 }
5782
5783 /*
5784  * Enable PCH resources required for PCH ports:
5785  *   - PCH PLLs
5786  *   - FDI training & RX/TX
5787  *   - update transcoder timings
5788  *   - DP transcoding bits
5789  *   - transcoder
5790  */
5791 static void ilk_pch_enable(const struct intel_atomic_state *state,
5792                            const struct intel_crtc_state *crtc_state)
5793 {
5794         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5795         struct drm_device *dev = crtc->base.dev;
5796         struct drm_i915_private *dev_priv = to_i915(dev);
5797         enum pipe pipe = crtc->pipe;
5798         u32 temp;
5799
5800         assert_pch_transcoder_disabled(dev_priv, pipe);
5801
5802         if (IS_IVYBRIDGE(dev_priv))
5803                 ivb_update_fdi_bc_bifurcation(crtc_state);
5804
5805         /* Write the TU size bits before fdi link training, so that error
5806          * detection works. */
5807         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5808                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5809
5810         /* For PCH output, training FDI link */
5811         dev_priv->display.fdi_link_train(crtc, crtc_state);
5812
5813         /* We need to program the right clock selection before writing the pixel
5814          * mutliplier into the DPLL. */
5815         if (HAS_PCH_CPT(dev_priv)) {
5816                 u32 sel;
5817
5818                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5819                 temp |= TRANS_DPLL_ENABLE(pipe);
5820                 sel = TRANS_DPLLB_SEL(pipe);
5821                 if (crtc_state->shared_dpll ==
5822                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5823                         temp |= sel;
5824                 else
5825                         temp &= ~sel;
5826                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5827         }
5828
5829         /* XXX: pch pll's can be enabled any time before we enable the PCH
5830          * transcoder, and we actually should do this to not upset any PCH
5831          * transcoder that already use the clock when we share it.
5832          *
5833          * Note that enable_shared_dpll tries to do the right thing, but
5834          * get_shared_dpll unconditionally resets the pll - we need that to have
5835          * the right LVDS enable sequence. */
5836         intel_enable_shared_dpll(crtc_state);
5837
5838         /* set transcoder timing, panel must allow it */
5839         assert_panel_unlocked(dev_priv, pipe);
5840         ilk_pch_transcoder_set_timings(crtc_state, pipe);
5841
5842         intel_fdi_normal_train(crtc);
5843
5844         /* For PCH DP, enable TRANS_DP_CTL */
5845         if (HAS_PCH_CPT(dev_priv) &&
5846             intel_crtc_has_dp_encoder(crtc_state)) {
5847                 const struct drm_display_mode *adjusted_mode =
5848                         &crtc_state->hw.adjusted_mode;
5849                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5850                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5851                 enum port port;
5852
5853                 temp = intel_de_read(dev_priv, reg);
5854                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5855                           TRANS_DP_SYNC_MASK |
5856                           TRANS_DP_BPC_MASK);
5857                 temp |= TRANS_DP_OUTPUT_ENABLE;
5858                 temp |= bpc << 9; /* same format but at 11:9 */
5859
5860                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5861                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5862                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5863                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5864
5865                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5866                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
5867                 temp |= TRANS_DP_PORT_SEL(port);
5868
5869                 intel_de_write(dev_priv, reg, temp);
5870         }
5871
5872         ilk_enable_pch_transcoder(crtc_state);
5873 }
5874
5875 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5876 {
5877         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5878         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5879         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5880
5881         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5882
5883         lpt_program_iclkip(crtc_state);
5884
5885         /* Set transcoder timing. */
5886         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5887
5888         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5889 }
5890
5891 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5892                                enum pipe pipe)
5893 {
5894         i915_reg_t dslreg = PIPEDSL(pipe);
5895         u32 temp;
5896
5897         temp = intel_de_read(dev_priv, dslreg);
5898         udelay(500);
5899         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5900                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5901                         drm_err(&dev_priv->drm,
5902                                 "mode set failed: pipe %c stuck\n",
5903                                 pipe_name(pipe));
5904         }
5905 }
5906
5907 /*
5908  * The hardware phase 0.0 refers to the center of the pixel.
5909  * We want to start from the top/left edge which is phase
5910  * -0.5. That matches how the hardware calculates the scaling
5911  * factors (from top-left of the first pixel to bottom-right
5912  * of the last pixel, as opposed to the pixel centers).
5913  *
5914  * For 4:2:0 subsampled chroma planes we obviously have to
5915  * adjust that so that the chroma sample position lands in
5916  * the right spot.
5917  *
5918  * Note that for packed YCbCr 4:2:2 formats there is no way to
5919  * control chroma siting. The hardware simply replicates the
5920  * chroma samples for both of the luma samples, and thus we don't
5921  * actually get the expected MPEG2 chroma siting convention :(
5922  * The same behaviour is observed on pre-SKL platforms as well.
5923  *
5924  * Theory behind the formula (note that we ignore sub-pixel
5925  * source coordinates):
5926  * s = source sample position
5927  * d = destination sample position
5928  *
5929  * Downscaling 4:1:
5930  * -0.5
5931  * | 0.0
5932  * | |     1.5 (initial phase)
5933  * | |     |
5934  * v v     v
5935  * | s | s | s | s |
5936  * |       d       |
5937  *
5938  * Upscaling 1:4:
5939  * -0.5
5940  * | -0.375 (initial phase)
5941  * | |     0.0
5942  * | |     |
5943  * v v     v
5944  * |       s       |
5945  * | d | d | d | d |
5946  */
5947 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5948 {
5949         int phase = -0x8000;
5950         u16 trip = 0;
5951
5952         if (chroma_cosited)
5953                 phase += (sub - 1) * 0x8000 / sub;
5954
5955         phase += scale / (2 * sub);
5956
5957         /*
5958          * Hardware initial phase limited to [-0.5:1.5].
5959          * Since the max hardware scale factor is 3.0, we
5960          * should never actually excdeed 1.0 here.
5961          */
5962         WARN_ON(phase < -0x8000 || phase > 0x18000);
5963
5964         if (phase < 0)
5965                 phase = 0x10000 + phase;
5966         else
5967                 trip = PS_PHASE_TRIP;
5968
5969         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5970 }
5971
5972 #define SKL_MIN_SRC_W 8
5973 #define SKL_MAX_SRC_W 4096
5974 #define SKL_MIN_SRC_H 8
5975 #define SKL_MAX_SRC_H 4096
5976 #define SKL_MIN_DST_W 8
5977 #define SKL_MAX_DST_W 4096
5978 #define SKL_MIN_DST_H 8
5979 #define SKL_MAX_DST_H 4096
5980 #define ICL_MAX_SRC_W 5120
5981 #define ICL_MAX_SRC_H 4096
5982 #define ICL_MAX_DST_W 5120
5983 #define ICL_MAX_DST_H 4096
5984 #define SKL_MIN_YUV_420_SRC_W 16
5985 #define SKL_MIN_YUV_420_SRC_H 16
5986
5987 static int
5988 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5989                   unsigned int scaler_user, int *scaler_id,
5990                   int src_w, int src_h, int dst_w, int dst_h,
5991                   const struct drm_format_info *format,
5992                   u64 modifier, bool need_scaler)
5993 {
5994         struct intel_crtc_scaler_state *scaler_state =
5995                 &crtc_state->scaler_state;
5996         struct intel_crtc *intel_crtc =
5997                 to_intel_crtc(crtc_state->uapi.crtc);
5998         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5999         const struct drm_display_mode *adjusted_mode =
6000                 &crtc_state->hw.adjusted_mode;
6001
6002         /*
6003          * Src coordinates are already rotated by 270 degrees for
6004          * the 90/270 degree plane rotation cases (to match the
6005          * GTT mapping), hence no need to account for rotation here.
6006          */
6007         if (src_w != dst_w || src_h != dst_h)
6008                 need_scaler = true;
6009
6010         /*
6011          * Scaling/fitting not supported in IF-ID mode in GEN9+
6012          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
6013          * Once NV12 is enabled, handle it here while allocating scaler
6014          * for NV12.
6015          */
6016         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
6017             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6018                 drm_dbg_kms(&dev_priv->drm,
6019                             "Pipe/Plane scaling not supported with IF-ID mode\n");
6020                 return -EINVAL;
6021         }
6022
6023         /*
6024          * if plane is being disabled or scaler is no more required or force detach
6025          *  - free scaler binded to this plane/crtc
6026          *  - in order to do this, update crtc->scaler_usage
6027          *
6028          * Here scaler state in crtc_state is set free so that
6029          * scaler can be assigned to other user. Actual register
6030          * update to free the scaler is done in plane/panel-fit programming.
6031          * For this purpose crtc/plane_state->scaler_id isn't reset here.
6032          */
6033         if (force_detach || !need_scaler) {
6034                 if (*scaler_id >= 0) {
6035                         scaler_state->scaler_users &= ~(1 << scaler_user);
6036                         scaler_state->scalers[*scaler_id].in_use = 0;
6037
6038                         drm_dbg_kms(&dev_priv->drm,
6039                                     "scaler_user index %u.%u: "
6040                                     "Staged freeing scaler id %d scaler_users = 0x%x\n",
6041                                     intel_crtc->pipe, scaler_user, *scaler_id,
6042                                     scaler_state->scaler_users);
6043                         *scaler_id = -1;
6044                 }
6045                 return 0;
6046         }
6047
6048         if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
6049             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
6050                 drm_dbg_kms(&dev_priv->drm,
6051                             "Planar YUV: src dimensions not met\n");
6052                 return -EINVAL;
6053         }
6054
6055         /* range checks */
6056         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
6057             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
6058             (INTEL_GEN(dev_priv) >= 11 &&
6059              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
6060               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
6061             (INTEL_GEN(dev_priv) < 11 &&
6062              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
6063               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
6064                 drm_dbg_kms(&dev_priv->drm,
6065                             "scaler_user index %u.%u: src %ux%u dst %ux%u "
6066                             "size is out of scaler range\n",
6067                             intel_crtc->pipe, scaler_user, src_w, src_h,
6068                             dst_w, dst_h);
6069                 return -EINVAL;
6070         }
6071
6072         /* mark this plane as a scaler user in crtc_state */
6073         scaler_state->scaler_users |= (1 << scaler_user);
6074         drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
6075                     "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
6076                     intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
6077                     scaler_state->scaler_users);
6078
6079         return 0;
6080 }
6081
6082 /**
6083  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
6084  *
6085  * @state: crtc's scaler state
6086  *
6087  * Return
6088  *     0 - scaler_usage updated successfully
6089  *    error - requested scaling cannot be supported or other error condition
6090  */
6091 int skl_update_scaler_crtc(struct intel_crtc_state *state)
6092 {
6093         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
6094         bool need_scaler = false;
6095
6096         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6097             state->pch_pfit.enabled)
6098                 need_scaler = true;
6099
6100         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
6101                                  &state->scaler_state.scaler_id,
6102                                  state->pipe_src_w, state->pipe_src_h,
6103                                  adjusted_mode->crtc_hdisplay,
6104                                  adjusted_mode->crtc_vdisplay, NULL, 0,
6105                                  need_scaler);
6106 }
6107
6108 /**
6109  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
6110  * @crtc_state: crtc's scaler state
6111  * @plane_state: atomic plane state to update
6112  *
6113  * Return
6114  *     0 - scaler_usage updated successfully
6115  *    error - requested scaling cannot be supported or other error condition
6116  */
6117 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
6118                                    struct intel_plane_state *plane_state)
6119 {
6120         struct intel_plane *intel_plane =
6121                 to_intel_plane(plane_state->uapi.plane);
6122         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
6123         struct drm_framebuffer *fb = plane_state->hw.fb;
6124         int ret;
6125         bool force_detach = !fb || !plane_state->uapi.visible;
6126         bool need_scaler = false;
6127
6128         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
6129         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
6130             fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
6131                 need_scaler = true;
6132
6133         ret = skl_update_scaler(crtc_state, force_detach,
6134                                 drm_plane_index(&intel_plane->base),
6135                                 &plane_state->scaler_id,
6136                                 drm_rect_width(&plane_state->uapi.src) >> 16,
6137                                 drm_rect_height(&plane_state->uapi.src) >> 16,
6138                                 drm_rect_width(&plane_state->uapi.dst),
6139                                 drm_rect_height(&plane_state->uapi.dst),
6140                                 fb ? fb->format : NULL,
6141                                 fb ? fb->modifier : 0,
6142                                 need_scaler);
6143
6144         if (ret || plane_state->scaler_id < 0)
6145                 return ret;
6146
6147         /* check colorkey */
6148         if (plane_state->ckey.flags) {
6149                 drm_dbg_kms(&dev_priv->drm,
6150                             "[PLANE:%d:%s] scaling with color key not allowed",
6151                             intel_plane->base.base.id,
6152                             intel_plane->base.name);
6153                 return -EINVAL;
6154         }
6155
6156         /* Check src format */
6157         switch (fb->format->format) {
6158         case DRM_FORMAT_RGB565:
6159         case DRM_FORMAT_XBGR8888:
6160         case DRM_FORMAT_XRGB8888:
6161         case DRM_FORMAT_ABGR8888:
6162         case DRM_FORMAT_ARGB8888:
6163         case DRM_FORMAT_XRGB2101010:
6164         case DRM_FORMAT_XBGR2101010:
6165         case DRM_FORMAT_ARGB2101010:
6166         case DRM_FORMAT_ABGR2101010:
6167         case DRM_FORMAT_YUYV:
6168         case DRM_FORMAT_YVYU:
6169         case DRM_FORMAT_UYVY:
6170         case DRM_FORMAT_VYUY:
6171         case DRM_FORMAT_NV12:
6172         case DRM_FORMAT_P010:
6173         case DRM_FORMAT_P012:
6174         case DRM_FORMAT_P016:
6175         case DRM_FORMAT_Y210:
6176         case DRM_FORMAT_Y212:
6177         case DRM_FORMAT_Y216:
6178         case DRM_FORMAT_XVYU2101010:
6179         case DRM_FORMAT_XVYU12_16161616:
6180         case DRM_FORMAT_XVYU16161616:
6181                 break;
6182         case DRM_FORMAT_XBGR16161616F:
6183         case DRM_FORMAT_ABGR16161616F:
6184         case DRM_FORMAT_XRGB16161616F:
6185         case DRM_FORMAT_ARGB16161616F:
6186                 if (INTEL_GEN(dev_priv) >= 11)
6187                         break;
6188                 /* fall through */
6189         default:
6190                 drm_dbg_kms(&dev_priv->drm,
6191                             "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
6192                             intel_plane->base.base.id, intel_plane->base.name,
6193                             fb->base.id, fb->format->format);
6194                 return -EINVAL;
6195         }
6196
6197         return 0;
6198 }
6199
6200 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
6201 {
6202         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6203         int i;
6204
6205         for (i = 0; i < crtc->num_scalers; i++)
6206                 skl_detach_scaler(crtc, i);
6207 }
6208
6209 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
6210 {
6211         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6212         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6213         enum pipe pipe = crtc->pipe;
6214         const struct intel_crtc_scaler_state *scaler_state =
6215                 &crtc_state->scaler_state;
6216
6217         if (crtc_state->pch_pfit.enabled) {
6218                 u16 uv_rgb_hphase, uv_rgb_vphase;
6219                 int pfit_w, pfit_h, hscale, vscale;
6220                 unsigned long irqflags;
6221                 int id;
6222
6223                 if (drm_WARN_ON(&dev_priv->drm,
6224                                 crtc_state->scaler_state.scaler_id < 0))
6225                         return;
6226
6227                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
6228                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
6229
6230                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
6231                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
6232
6233                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
6234                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
6235
6236                 id = scaler_state->scaler_id;
6237
6238                 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6239
6240                 intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
6241                                   PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
6242                 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
6243                                   PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
6244                 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
6245                                   PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
6246                 intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
6247                                   crtc_state->pch_pfit.pos);
6248                 intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
6249                                   crtc_state->pch_pfit.size);
6250
6251                 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6252         }
6253 }
6254
6255 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
6256 {
6257         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6258         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6259         enum pipe pipe = crtc->pipe;
6260
6261         if (crtc_state->pch_pfit.enabled) {
6262                 /* Force use of hard-coded filter coefficients
6263                  * as some pre-programmed values are broken,
6264                  * e.g. x201.
6265                  */
6266                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6267                         intel_de_write(dev_priv, PF_CTL(pipe),
6268                                        PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
6269                 else
6270                         intel_de_write(dev_priv, PF_CTL(pipe),
6271                                        PF_ENABLE | PF_FILTER_MED_3x3);
6272                 intel_de_write(dev_priv, PF_WIN_POS(pipe),
6273                                crtc_state->pch_pfit.pos);
6274                 intel_de_write(dev_priv, PF_WIN_SZ(pipe),
6275                                crtc_state->pch_pfit.size);
6276         }
6277 }
6278
6279 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6280 {
6281         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6282         struct drm_device *dev = crtc->base.dev;
6283         struct drm_i915_private *dev_priv = to_i915(dev);
6284
6285         if (!crtc_state->ips_enabled)
6286                 return;
6287
6288         /*
6289          * We can only enable IPS after we enable a plane and wait for a vblank
6290          * This function is called from post_plane_update, which is run after
6291          * a vblank wait.
6292          */
6293         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6294
6295         if (IS_BROADWELL(dev_priv)) {
6296                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6297                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
6298                 /* Quoting Art Runyan: "its not safe to expect any particular
6299                  * value in IPS_CTL bit 31 after enabling IPS through the
6300                  * mailbox." Moreover, the mailbox may return a bogus state,
6301                  * so we need to just enable it and continue on.
6302                  */
6303         } else {
6304                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
6305                 /* The bit only becomes 1 in the next vblank, so this wait here
6306                  * is essentially intel_wait_for_vblank. If we don't have this
6307                  * and don't wait for vblanks until the end of crtc_enable, then
6308                  * the HW state readout code will complain that the expected
6309                  * IPS_CTL value is not the one we read. */
6310                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6311                         drm_err(&dev_priv->drm,
6312                                 "Timed out waiting for IPS enable\n");
6313         }
6314 }
6315
6316 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6317 {
6318         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6319         struct drm_device *dev = crtc->base.dev;
6320         struct drm_i915_private *dev_priv = to_i915(dev);
6321
6322         if (!crtc_state->ips_enabled)
6323                 return;
6324
6325         if (IS_BROADWELL(dev_priv)) {
6326                 drm_WARN_ON(dev,
6327                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6328                 /*
6329                  * Wait for PCODE to finish disabling IPS. The BSpec specified
6330                  * 42ms timeout value leads to occasional timeouts so use 100ms
6331                  * instead.
6332                  */
6333                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6334                         drm_err(&dev_priv->drm,
6335                                 "Timed out waiting for IPS disable\n");
6336         } else {
6337                 intel_de_write(dev_priv, IPS_CTL, 0);
6338                 intel_de_posting_read(dev_priv, IPS_CTL);
6339         }
6340
6341         /* We need to wait for a vblank before we can disable the plane. */
6342         intel_wait_for_vblank(dev_priv, crtc->pipe);
6343 }
6344
6345 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6346 {
6347         if (intel_crtc->overlay)
6348                 (void) intel_overlay_switch_off(intel_crtc->overlay);
6349
6350         /* Let userspace switch the overlay on again. In most cases userspace
6351          * has to recompute where to put it anyway.
6352          */
6353 }
6354
6355 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6356                                        const struct intel_crtc_state *new_crtc_state)
6357 {
6358         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6359         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6360
6361         if (!old_crtc_state->ips_enabled)
6362                 return false;
6363
6364         if (needs_modeset(new_crtc_state))
6365                 return true;
6366
6367         /*
6368          * Workaround : Do not read or write the pipe palette/gamma data while
6369          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6370          *
6371          * Disable IPS before we program the LUT.
6372          */
6373         if (IS_HASWELL(dev_priv) &&
6374             (new_crtc_state->uapi.color_mgmt_changed ||
6375              new_crtc_state->update_pipe) &&
6376             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6377                 return true;
6378
6379         return !new_crtc_state->ips_enabled;
6380 }
6381
6382 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6383                                        const struct intel_crtc_state *new_crtc_state)
6384 {
6385         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6386         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6387
6388         if (!new_crtc_state->ips_enabled)
6389                 return false;
6390
6391         if (needs_modeset(new_crtc_state))
6392                 return true;
6393
6394         /*
6395          * Workaround : Do not read or write the pipe palette/gamma data while
6396          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6397          *
6398          * Re-enable IPS after the LUT has been programmed.
6399          */
6400         if (IS_HASWELL(dev_priv) &&
6401             (new_crtc_state->uapi.color_mgmt_changed ||
6402              new_crtc_state->update_pipe) &&
6403             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6404                 return true;
6405
6406         /*
6407          * We can't read out IPS on broadwell, assume the worst and
6408          * forcibly enable IPS on the first fastset.
6409          */
6410         if (new_crtc_state->update_pipe &&
6411             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6412                 return true;
6413
6414         return !old_crtc_state->ips_enabled;
6415 }
6416
6417 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6418 {
6419         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6420
6421         if (!crtc_state->nv12_planes)
6422                 return false;
6423
6424         /* WA Display #0827: Gen9:all */
6425         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6426                 return true;
6427
6428         return false;
6429 }
6430
6431 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6432 {
6433         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6434
6435         /* Wa_2006604312:icl */
6436         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6437                 return true;
6438
6439         return false;
6440 }
6441
6442 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6443                             const struct intel_crtc_state *new_crtc_state)
6444 {
6445         return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6446                 new_crtc_state->active_planes;
6447 }
6448
6449 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6450                              const struct intel_crtc_state *new_crtc_state)
6451 {
6452         return old_crtc_state->active_planes &&
6453                 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6454 }
6455
6456 static void intel_post_plane_update(struct intel_atomic_state *state,
6457                                     struct intel_crtc *crtc)
6458 {
6459         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6460         const struct intel_crtc_state *old_crtc_state =
6461                 intel_atomic_get_old_crtc_state(state, crtc);
6462         const struct intel_crtc_state *new_crtc_state =
6463                 intel_atomic_get_new_crtc_state(state, crtc);
6464         enum pipe pipe = crtc->pipe;
6465
6466         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6467
6468         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6469                 intel_update_watermarks(crtc);
6470
6471         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6472                 hsw_enable_ips(new_crtc_state);
6473
6474         intel_fbc_post_update(state, crtc);
6475
6476         if (needs_nv12_wa(old_crtc_state) &&
6477             !needs_nv12_wa(new_crtc_state))
6478                 skl_wa_827(dev_priv, pipe, false);
6479
6480         if (needs_scalerclk_wa(old_crtc_state) &&
6481             !needs_scalerclk_wa(new_crtc_state))
6482                 icl_wa_scalerclkgating(dev_priv, pipe, false);
6483 }
6484
6485 static void intel_pre_plane_update(struct intel_atomic_state *state,
6486                                    struct intel_crtc *crtc)
6487 {
6488         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6489         const struct intel_crtc_state *old_crtc_state =
6490                 intel_atomic_get_old_crtc_state(state, crtc);
6491         const struct intel_crtc_state *new_crtc_state =
6492                 intel_atomic_get_new_crtc_state(state, crtc);
6493         enum pipe pipe = crtc->pipe;
6494
6495         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6496                 hsw_disable_ips(old_crtc_state);
6497
6498         if (intel_fbc_pre_update(state, crtc))
6499                 intel_wait_for_vblank(dev_priv, pipe);
6500
6501         /* Display WA 827 */
6502         if (!needs_nv12_wa(old_crtc_state) &&
6503             needs_nv12_wa(new_crtc_state))
6504                 skl_wa_827(dev_priv, pipe, true);
6505
6506         /* Wa_2006604312:icl */
6507         if (!needs_scalerclk_wa(old_crtc_state) &&
6508             needs_scalerclk_wa(new_crtc_state))
6509                 icl_wa_scalerclkgating(dev_priv, pipe, true);
6510
6511         /*
6512          * Vblank time updates from the shadow to live plane control register
6513          * are blocked if the memory self-refresh mode is active at that
6514          * moment. So to make sure the plane gets truly disabled, disable
6515          * first the self-refresh mode. The self-refresh enable bit in turn
6516          * will be checked/applied by the HW only at the next frame start
6517          * event which is after the vblank start event, so we need to have a
6518          * wait-for-vblank between disabling the plane and the pipe.
6519          */
6520         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6521             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6522                 intel_wait_for_vblank(dev_priv, pipe);
6523
6524         /*
6525          * IVB workaround: must disable low power watermarks for at least
6526          * one frame before enabling scaling.  LP watermarks can be re-enabled
6527          * when scaling is disabled.
6528          *
6529          * WaCxSRDisabledForSpriteScaling:ivb
6530          */
6531         if (old_crtc_state->hw.active &&
6532             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6533                 intel_wait_for_vblank(dev_priv, pipe);
6534
6535         /*
6536          * If we're doing a modeset we don't need to do any
6537          * pre-vblank watermark programming here.
6538          */
6539         if (!needs_modeset(new_crtc_state)) {
6540                 /*
6541                  * For platforms that support atomic watermarks, program the
6542                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6543                  * will be the intermediate values that are safe for both pre- and
6544                  * post- vblank; when vblank happens, the 'active' values will be set
6545                  * to the final 'target' values and we'll do this again to get the
6546                  * optimal watermarks.  For gen9+ platforms, the values we program here
6547                  * will be the final target values which will get automatically latched
6548                  * at vblank time; no further programming will be necessary.
6549                  *
6550                  * If a platform hasn't been transitioned to atomic watermarks yet,
6551                  * we'll continue to update watermarks the old way, if flags tell
6552                  * us to.
6553                  */
6554                 if (dev_priv->display.initial_watermarks)
6555                         dev_priv->display.initial_watermarks(state, crtc);
6556                 else if (new_crtc_state->update_wm_pre)
6557                         intel_update_watermarks(crtc);
6558         }
6559
6560         /*
6561          * Gen2 reports pipe underruns whenever all planes are disabled.
6562          * So disable underrun reporting before all the planes get disabled.
6563          *
6564          * We do this after .initial_watermarks() so that we have a
6565          * chance of catching underruns with the intermediate watermarks
6566          * vs. the old plane configuration.
6567          */
6568         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6569                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6570 }
6571
6572 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6573                                       struct intel_crtc *crtc)
6574 {
6575         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6576         const struct intel_crtc_state *new_crtc_state =
6577                 intel_atomic_get_new_crtc_state(state, crtc);
6578         unsigned int update_mask = new_crtc_state->update_planes;
6579         const struct intel_plane_state *old_plane_state;
6580         struct intel_plane *plane;
6581         unsigned fb_bits = 0;
6582         int i;
6583
6584         intel_crtc_dpms_overlay_disable(crtc);
6585
6586         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6587                 if (crtc->pipe != plane->pipe ||
6588                     !(update_mask & BIT(plane->id)))
6589                         continue;
6590
6591                 intel_disable_plane(plane, new_crtc_state);
6592
6593                 if (old_plane_state->uapi.visible)
6594                         fb_bits |= plane->frontbuffer_bit;
6595         }
6596
6597         intel_frontbuffer_flip(dev_priv, fb_bits);
6598 }
6599
6600 /*
6601  * intel_connector_primary_encoder - get the primary encoder for a connector
6602  * @connector: connector for which to return the encoder
6603  *
6604  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6605  * all connectors to their encoder, except for DP-MST connectors which have
6606  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6607  * pointed to by as many DP-MST connectors as there are pipes.
6608  */
6609 static struct intel_encoder *
6610 intel_connector_primary_encoder(struct intel_connector *connector)
6611 {
6612         struct intel_encoder *encoder;
6613
6614         if (connector->mst_port)
6615                 return &dp_to_dig_port(connector->mst_port)->base;
6616
6617         encoder = intel_attached_encoder(connector);
6618         WARN_ON(!encoder);
6619
6620         return encoder;
6621 }
6622
6623 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6624 {
6625         struct drm_connector_state *new_conn_state;
6626         struct drm_connector *connector;
6627         int i;
6628
6629         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6630                                         i) {
6631                 struct intel_connector *intel_connector;
6632                 struct intel_encoder *encoder;
6633                 struct intel_crtc *crtc;
6634
6635                 if (!intel_connector_needs_modeset(state, connector))
6636                         continue;
6637
6638                 intel_connector = to_intel_connector(connector);
6639                 encoder = intel_connector_primary_encoder(intel_connector);
6640                 if (!encoder->update_prepare)
6641                         continue;
6642
6643                 crtc = new_conn_state->crtc ?
6644                         to_intel_crtc(new_conn_state->crtc) : NULL;
6645                 encoder->update_prepare(state, encoder, crtc);
6646         }
6647 }
6648
6649 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6650 {
6651         struct drm_connector_state *new_conn_state;
6652         struct drm_connector *connector;
6653         int i;
6654
6655         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6656                                         i) {
6657                 struct intel_connector *intel_connector;
6658                 struct intel_encoder *encoder;
6659                 struct intel_crtc *crtc;
6660
6661                 if (!intel_connector_needs_modeset(state, connector))
6662                         continue;
6663
6664                 intel_connector = to_intel_connector(connector);
6665                 encoder = intel_connector_primary_encoder(intel_connector);
6666                 if (!encoder->update_complete)
6667                         continue;
6668
6669                 crtc = new_conn_state->crtc ?
6670                         to_intel_crtc(new_conn_state->crtc) : NULL;
6671                 encoder->update_complete(state, encoder, crtc);
6672         }
6673 }
6674
6675 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6676                                           struct intel_crtc *crtc)
6677 {
6678         const struct intel_crtc_state *crtc_state =
6679                 intel_atomic_get_new_crtc_state(state, crtc);
6680         const struct drm_connector_state *conn_state;
6681         struct drm_connector *conn;
6682         int i;
6683
6684         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6685                 struct intel_encoder *encoder =
6686                         to_intel_encoder(conn_state->best_encoder);
6687
6688                 if (conn_state->crtc != &crtc->base)
6689                         continue;
6690
6691                 if (encoder->pre_pll_enable)
6692                         encoder->pre_pll_enable(state, encoder,
6693                                                 crtc_state, conn_state);
6694         }
6695 }
6696
6697 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6698                                       struct intel_crtc *crtc)
6699 {
6700         const struct intel_crtc_state *crtc_state =
6701                 intel_atomic_get_new_crtc_state(state, crtc);
6702         const struct drm_connector_state *conn_state;
6703         struct drm_connector *conn;
6704         int i;
6705
6706         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6707                 struct intel_encoder *encoder =
6708                         to_intel_encoder(conn_state->best_encoder);
6709
6710                 if (conn_state->crtc != &crtc->base)
6711                         continue;
6712
6713                 if (encoder->pre_enable)
6714                         encoder->pre_enable(state, encoder,
6715                                             crtc_state, conn_state);
6716         }
6717 }
6718
6719 static void intel_encoders_enable(struct intel_atomic_state *state,
6720                                   struct intel_crtc *crtc)
6721 {
6722         const struct intel_crtc_state *crtc_state =
6723                 intel_atomic_get_new_crtc_state(state, crtc);
6724         const struct drm_connector_state *conn_state;
6725         struct drm_connector *conn;
6726         int i;
6727
6728         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6729                 struct intel_encoder *encoder =
6730                         to_intel_encoder(conn_state->best_encoder);
6731
6732                 if (conn_state->crtc != &crtc->base)
6733                         continue;
6734
6735                 if (encoder->enable)
6736                         encoder->enable(state, encoder,
6737                                         crtc_state, conn_state);
6738                 intel_opregion_notify_encoder(encoder, true);
6739         }
6740 }
6741
6742 static void intel_encoders_disable(struct intel_atomic_state *state,
6743                                    struct intel_crtc *crtc)
6744 {
6745         const struct intel_crtc_state *old_crtc_state =
6746                 intel_atomic_get_old_crtc_state(state, crtc);
6747         const struct drm_connector_state *old_conn_state;
6748         struct drm_connector *conn;
6749         int i;
6750
6751         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6752                 struct intel_encoder *encoder =
6753                         to_intel_encoder(old_conn_state->best_encoder);
6754
6755                 if (old_conn_state->crtc != &crtc->base)
6756                         continue;
6757
6758                 intel_opregion_notify_encoder(encoder, false);
6759                 if (encoder->disable)
6760                         encoder->disable(state, encoder,
6761                                          old_crtc_state, old_conn_state);
6762         }
6763 }
6764
6765 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6766                                         struct intel_crtc *crtc)
6767 {
6768         const struct intel_crtc_state *old_crtc_state =
6769                 intel_atomic_get_old_crtc_state(state, crtc);
6770         const struct drm_connector_state *old_conn_state;
6771         struct drm_connector *conn;
6772         int i;
6773
6774         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6775                 struct intel_encoder *encoder =
6776                         to_intel_encoder(old_conn_state->best_encoder);
6777
6778                 if (old_conn_state->crtc != &crtc->base)
6779                         continue;
6780
6781                 if (encoder->post_disable)
6782                         encoder->post_disable(state, encoder,
6783                                               old_crtc_state, old_conn_state);
6784         }
6785 }
6786
6787 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6788                                             struct intel_crtc *crtc)
6789 {
6790         const struct intel_crtc_state *old_crtc_state =
6791                 intel_atomic_get_old_crtc_state(state, crtc);
6792         const struct drm_connector_state *old_conn_state;
6793         struct drm_connector *conn;
6794         int i;
6795
6796         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6797                 struct intel_encoder *encoder =
6798                         to_intel_encoder(old_conn_state->best_encoder);
6799
6800                 if (old_conn_state->crtc != &crtc->base)
6801                         continue;
6802
6803                 if (encoder->post_pll_disable)
6804                         encoder->post_pll_disable(state, encoder,
6805                                                   old_crtc_state, old_conn_state);
6806         }
6807 }
6808
6809 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6810                                        struct intel_crtc *crtc)
6811 {
6812         const struct intel_crtc_state *crtc_state =
6813                 intel_atomic_get_new_crtc_state(state, crtc);
6814         const struct drm_connector_state *conn_state;
6815         struct drm_connector *conn;
6816         int i;
6817
6818         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6819                 struct intel_encoder *encoder =
6820                         to_intel_encoder(conn_state->best_encoder);
6821
6822                 if (conn_state->crtc != &crtc->base)
6823                         continue;
6824
6825                 if (encoder->update_pipe)
6826                         encoder->update_pipe(state, encoder,
6827                                              crtc_state, conn_state);
6828         }
6829 }
6830
6831 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6832 {
6833         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6834         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6835
6836         plane->disable_plane(plane, crtc_state);
6837 }
6838
6839 static void ilk_crtc_enable(struct intel_atomic_state *state,
6840                             struct intel_crtc *crtc)
6841 {
6842         const struct intel_crtc_state *new_crtc_state =
6843                 intel_atomic_get_new_crtc_state(state, crtc);
6844         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6845         enum pipe pipe = crtc->pipe;
6846
6847         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
6848                 return;
6849
6850         /*
6851          * Sometimes spurious CPU pipe underruns happen during FDI
6852          * training, at least with VGA+HDMI cloning. Suppress them.
6853          *
6854          * On ILK we get an occasional spurious CPU pipe underruns
6855          * between eDP port A enable and vdd enable. Also PCH port
6856          * enable seems to result in the occasional CPU pipe underrun.
6857          *
6858          * Spurious PCH underruns also occur during PCH enabling.
6859          */
6860         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6861         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6862
6863         if (new_crtc_state->has_pch_encoder)
6864                 intel_prepare_shared_dpll(new_crtc_state);
6865
6866         if (intel_crtc_has_dp_encoder(new_crtc_state))
6867                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6868
6869         intel_set_pipe_timings(new_crtc_state);
6870         intel_set_pipe_src_size(new_crtc_state);
6871
6872         if (new_crtc_state->has_pch_encoder)
6873                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6874                                              &new_crtc_state->fdi_m_n, NULL);
6875
6876         ilk_set_pipeconf(new_crtc_state);
6877
6878         crtc->active = true;
6879
6880         intel_encoders_pre_enable(state, crtc);
6881
6882         if (new_crtc_state->has_pch_encoder) {
6883                 /* Note: FDI PLL enabling _must_ be done before we enable the
6884                  * cpu pipes, hence this is separate from all the other fdi/pch
6885                  * enabling. */
6886                 ilk_fdi_pll_enable(new_crtc_state);
6887         } else {
6888                 assert_fdi_tx_disabled(dev_priv, pipe);
6889                 assert_fdi_rx_disabled(dev_priv, pipe);
6890         }
6891
6892         ilk_pfit_enable(new_crtc_state);
6893
6894         /*
6895          * On ILK+ LUT must be loaded before the pipe is running but with
6896          * clocks enabled
6897          */
6898         intel_color_load_luts(new_crtc_state);
6899         intel_color_commit(new_crtc_state);
6900         /* update DSPCNTR to configure gamma for pipe bottom color */
6901         intel_disable_primary_plane(new_crtc_state);
6902
6903         if (dev_priv->display.initial_watermarks)
6904                 dev_priv->display.initial_watermarks(state, crtc);
6905         intel_enable_pipe(new_crtc_state);
6906
6907         if (new_crtc_state->has_pch_encoder)
6908                 ilk_pch_enable(state, new_crtc_state);
6909
6910         intel_crtc_vblank_on(new_crtc_state);
6911
6912         intel_encoders_enable(state, crtc);
6913
6914         if (HAS_PCH_CPT(dev_priv))
6915                 cpt_verify_modeset(dev_priv, pipe);
6916
6917         /*
6918          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6919          * And a second vblank wait is needed at least on ILK with
6920          * some interlaced HDMI modes. Let's do the double wait always
6921          * in case there are more corner cases we don't know about.
6922          */
6923         if (new_crtc_state->has_pch_encoder) {
6924                 intel_wait_for_vblank(dev_priv, pipe);
6925                 intel_wait_for_vblank(dev_priv, pipe);
6926         }
6927         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6928         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6929 }
6930
6931 /* IPS only exists on ULT machines and is tied to pipe A. */
6932 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6933 {
6934         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6935 }
6936
6937 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6938                                             enum pipe pipe, bool apply)
6939 {
6940         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
6941         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6942
6943         if (apply)
6944                 val |= mask;
6945         else
6946                 val &= ~mask;
6947
6948         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
6949 }
6950
6951 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6952 {
6953         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6954         enum pipe pipe = crtc->pipe;
6955         u32 val;
6956
6957         val = MBUS_DBOX_A_CREDIT(2);
6958
6959         if (INTEL_GEN(dev_priv) >= 12) {
6960                 val |= MBUS_DBOX_BW_CREDIT(2);
6961                 val |= MBUS_DBOX_B_CREDIT(12);
6962         } else {
6963                 val |= MBUS_DBOX_BW_CREDIT(1);
6964                 val |= MBUS_DBOX_B_CREDIT(8);
6965         }
6966
6967         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
6968 }
6969
6970 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
6971 {
6972         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6973         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6974
6975         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
6976                        HSW_LINETIME(crtc_state->linetime) |
6977                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
6978 }
6979
6980 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6981 {
6982         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6983         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6984         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6985         u32 val;
6986
6987         val = intel_de_read(dev_priv, reg);
6988         val &= ~HSW_FRAME_START_DELAY_MASK;
6989         val |= HSW_FRAME_START_DELAY(0);
6990         intel_de_write(dev_priv, reg, val);
6991 }
6992
6993 static void hsw_crtc_enable(struct intel_atomic_state *state,
6994                             struct intel_crtc *crtc)
6995 {
6996         const struct intel_crtc_state *new_crtc_state =
6997                 intel_atomic_get_new_crtc_state(state, crtc);
6998         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6999         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
7000         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
7001         bool psl_clkgate_wa;
7002
7003         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7004                 return;
7005
7006         intel_encoders_pre_pll_enable(state, crtc);
7007
7008         if (new_crtc_state->shared_dpll)
7009                 intel_enable_shared_dpll(new_crtc_state);
7010
7011         intel_encoders_pre_enable(state, crtc);
7012
7013         if (!transcoder_is_dsi(cpu_transcoder))
7014                 intel_set_pipe_timings(new_crtc_state);
7015
7016         intel_set_pipe_src_size(new_crtc_state);
7017
7018         if (cpu_transcoder != TRANSCODER_EDP &&
7019             !transcoder_is_dsi(cpu_transcoder))
7020                 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
7021                                new_crtc_state->pixel_multiplier - 1);
7022
7023         if (new_crtc_state->has_pch_encoder)
7024                 intel_cpu_transcoder_set_m_n(new_crtc_state,
7025                                              &new_crtc_state->fdi_m_n, NULL);
7026
7027         if (!transcoder_is_dsi(cpu_transcoder)) {
7028                 hsw_set_frame_start_delay(new_crtc_state);
7029                 hsw_set_pipeconf(new_crtc_state);
7030         }
7031
7032         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7033                 bdw_set_pipemisc(new_crtc_state);
7034
7035         crtc->active = true;
7036
7037         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
7038         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
7039                 new_crtc_state->pch_pfit.enabled;
7040         if (psl_clkgate_wa)
7041                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
7042
7043         if (INTEL_GEN(dev_priv) >= 9)
7044                 skl_pfit_enable(new_crtc_state);
7045         else
7046                 ilk_pfit_enable(new_crtc_state);
7047
7048         /*
7049          * On ILK+ LUT must be loaded before the pipe is running but with
7050          * clocks enabled
7051          */
7052         intel_color_load_luts(new_crtc_state);
7053         intel_color_commit(new_crtc_state);
7054         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
7055         if (INTEL_GEN(dev_priv) < 9)
7056                 intel_disable_primary_plane(new_crtc_state);
7057
7058         hsw_set_linetime_wm(new_crtc_state);
7059
7060         if (INTEL_GEN(dev_priv) >= 11)
7061                 icl_set_pipe_chicken(crtc);
7062
7063         if (!transcoder_is_dsi(cpu_transcoder))
7064                 intel_ddi_enable_transcoder_func(new_crtc_state);
7065
7066         if (dev_priv->display.initial_watermarks)
7067                 dev_priv->display.initial_watermarks(state, crtc);
7068
7069         if (INTEL_GEN(dev_priv) >= 11)
7070                 icl_pipe_mbus_enable(crtc);
7071
7072         intel_encoders_enable(state, crtc);
7073
7074         if (psl_clkgate_wa) {
7075                 intel_wait_for_vblank(dev_priv, pipe);
7076                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
7077         }
7078
7079         /* If we change the relative order between pipe/planes enabling, we need
7080          * to change the workaround. */
7081         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
7082         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
7083                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7084                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7085         }
7086 }
7087
7088 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7089 {
7090         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7091         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7092         enum pipe pipe = crtc->pipe;
7093
7094         /* To avoid upsetting the power well on haswell only disable the pfit if
7095          * it's in use. The hw state code will make sure we get this right. */
7096         if (old_crtc_state->pch_pfit.enabled) {
7097                 intel_de_write(dev_priv, PF_CTL(pipe), 0);
7098                 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
7099                 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
7100         }
7101 }
7102
7103 static void ilk_crtc_disable(struct intel_atomic_state *state,
7104                              struct intel_crtc *crtc)
7105 {
7106         const struct intel_crtc_state *old_crtc_state =
7107                 intel_atomic_get_old_crtc_state(state, crtc);
7108         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7109         enum pipe pipe = crtc->pipe;
7110
7111         /*
7112          * Sometimes spurious CPU pipe underruns happen when the
7113          * pipe is already disabled, but FDI RX/TX is still enabled.
7114          * Happens at least with VGA+HDMI cloning. Suppress them.
7115          */
7116         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7117         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
7118
7119         intel_encoders_disable(state, crtc);
7120
7121         intel_crtc_vblank_off(old_crtc_state);
7122
7123         intel_disable_pipe(old_crtc_state);
7124
7125         ilk_pfit_disable(old_crtc_state);
7126
7127         if (old_crtc_state->has_pch_encoder)
7128                 ilk_fdi_disable(crtc);
7129
7130         intel_encoders_post_disable(state, crtc);
7131
7132         if (old_crtc_state->has_pch_encoder) {
7133                 ilk_disable_pch_transcoder(dev_priv, pipe);
7134
7135                 if (HAS_PCH_CPT(dev_priv)) {
7136                         i915_reg_t reg;
7137                         u32 temp;
7138
7139                         /* disable TRANS_DP_CTL */
7140                         reg = TRANS_DP_CTL(pipe);
7141                         temp = intel_de_read(dev_priv, reg);
7142                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
7143                                   TRANS_DP_PORT_SEL_MASK);
7144                         temp |= TRANS_DP_PORT_SEL_NONE;
7145                         intel_de_write(dev_priv, reg, temp);
7146
7147                         /* disable DPLL_SEL */
7148                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7149                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
7150                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
7151                 }
7152
7153                 ilk_fdi_pll_disable(crtc);
7154         }
7155
7156         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7157         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7158 }
7159
7160 static void hsw_crtc_disable(struct intel_atomic_state *state,
7161                              struct intel_crtc *crtc)
7162 {
7163         /*
7164          * FIXME collapse everything to one hook.
7165          * Need care with mst->ddi interactions.
7166          */
7167         intel_encoders_disable(state, crtc);
7168         intel_encoders_post_disable(state, crtc);
7169 }
7170
7171 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
7172 {
7173         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7174         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7175
7176         if (!crtc_state->gmch_pfit.control)
7177                 return;
7178
7179         /*
7180          * The panel fitter should only be adjusted whilst the pipe is disabled,
7181          * according to register description and PRM.
7182          */
7183         drm_WARN_ON(&dev_priv->drm,
7184                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
7185         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
7186
7187         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
7188                        crtc_state->gmch_pfit.pgm_ratios);
7189         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
7190
7191         /* Border color in case we don't scale up to the full screen. Black by
7192          * default, change to something else for debugging. */
7193         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
7194 }
7195
7196 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
7197 {
7198         if (phy == PHY_NONE)
7199                 return false;
7200
7201         if (IS_ELKHARTLAKE(dev_priv))
7202                 return phy <= PHY_C;
7203
7204         if (INTEL_GEN(dev_priv) >= 11)
7205                 return phy <= PHY_B;
7206
7207         return false;
7208 }
7209
7210 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
7211 {
7212         if (INTEL_GEN(dev_priv) >= 12)
7213                 return phy >= PHY_D && phy <= PHY_I;
7214
7215         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
7216                 return phy >= PHY_C && phy <= PHY_F;
7217
7218         return false;
7219 }
7220
7221 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
7222 {
7223         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
7224                 return PHY_A;
7225
7226         return (enum phy)port;
7227 }
7228
7229 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
7230 {
7231         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
7232                 return PORT_TC_NONE;
7233
7234         if (INTEL_GEN(dev_priv) >= 12)
7235                 return port - PORT_D;
7236
7237         return port - PORT_C;
7238 }
7239
7240 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
7241 {
7242         switch (port) {
7243         case PORT_A:
7244                 return POWER_DOMAIN_PORT_DDI_A_LANES;
7245         case PORT_B:
7246                 return POWER_DOMAIN_PORT_DDI_B_LANES;
7247         case PORT_C:
7248                 return POWER_DOMAIN_PORT_DDI_C_LANES;
7249         case PORT_D:
7250                 return POWER_DOMAIN_PORT_DDI_D_LANES;
7251         case PORT_E:
7252                 return POWER_DOMAIN_PORT_DDI_E_LANES;
7253         case PORT_F:
7254                 return POWER_DOMAIN_PORT_DDI_F_LANES;
7255         case PORT_G:
7256                 return POWER_DOMAIN_PORT_DDI_G_LANES;
7257         default:
7258                 MISSING_CASE(port);
7259                 return POWER_DOMAIN_PORT_OTHER;
7260         }
7261 }
7262
7263 enum intel_display_power_domain
7264 intel_aux_power_domain(struct intel_digital_port *dig_port)
7265 {
7266         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7267         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7268
7269         if (intel_phy_is_tc(dev_priv, phy) &&
7270             dig_port->tc_mode == TC_PORT_TBT_ALT) {
7271                 switch (dig_port->aux_ch) {
7272                 case AUX_CH_C:
7273                         return POWER_DOMAIN_AUX_C_TBT;
7274                 case AUX_CH_D:
7275                         return POWER_DOMAIN_AUX_D_TBT;
7276                 case AUX_CH_E:
7277                         return POWER_DOMAIN_AUX_E_TBT;
7278                 case AUX_CH_F:
7279                         return POWER_DOMAIN_AUX_F_TBT;
7280                 case AUX_CH_G:
7281                         return POWER_DOMAIN_AUX_G_TBT;
7282                 default:
7283                         MISSING_CASE(dig_port->aux_ch);
7284                         return POWER_DOMAIN_AUX_C_TBT;
7285                 }
7286         }
7287
7288         switch (dig_port->aux_ch) {
7289         case AUX_CH_A:
7290                 return POWER_DOMAIN_AUX_A;
7291         case AUX_CH_B:
7292                 return POWER_DOMAIN_AUX_B;
7293         case AUX_CH_C:
7294                 return POWER_DOMAIN_AUX_C;
7295         case AUX_CH_D:
7296                 return POWER_DOMAIN_AUX_D;
7297         case AUX_CH_E:
7298                 return POWER_DOMAIN_AUX_E;
7299         case AUX_CH_F:
7300                 return POWER_DOMAIN_AUX_F;
7301         case AUX_CH_G:
7302                 return POWER_DOMAIN_AUX_G;
7303         default:
7304                 MISSING_CASE(dig_port->aux_ch);
7305                 return POWER_DOMAIN_AUX_A;
7306         }
7307 }
7308
7309 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7310 {
7311         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7312         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7313         struct drm_encoder *encoder;
7314         enum pipe pipe = crtc->pipe;
7315         u64 mask;
7316         enum transcoder transcoder = crtc_state->cpu_transcoder;
7317
7318         if (!crtc_state->hw.active)
7319                 return 0;
7320
7321         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7322         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7323         if (crtc_state->pch_pfit.enabled ||
7324             crtc_state->pch_pfit.force_thru)
7325                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7326
7327         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7328                                   crtc_state->uapi.encoder_mask) {
7329                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7330
7331                 mask |= BIT_ULL(intel_encoder->power_domain);
7332         }
7333
7334         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7335                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7336
7337         if (crtc_state->shared_dpll)
7338                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7339
7340         return mask;
7341 }
7342
7343 static u64
7344 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7345 {
7346         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7347         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7348         enum intel_display_power_domain domain;
7349         u64 domains, new_domains, old_domains;
7350
7351         old_domains = crtc->enabled_power_domains;
7352         crtc->enabled_power_domains = new_domains =
7353                 get_crtc_power_domains(crtc_state);
7354
7355         domains = new_domains & ~old_domains;
7356
7357         for_each_power_domain(domain, domains)
7358                 intel_display_power_get(dev_priv, domain);
7359
7360         return old_domains & ~new_domains;
7361 }
7362
7363 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7364                                       u64 domains)
7365 {
7366         enum intel_display_power_domain domain;
7367
7368         for_each_power_domain(domain, domains)
7369                 intel_display_power_put_unchecked(dev_priv, domain);
7370 }
7371
7372 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7373                                    struct intel_crtc *crtc)
7374 {
7375         const struct intel_crtc_state *new_crtc_state =
7376                 intel_atomic_get_new_crtc_state(state, crtc);
7377         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7378         enum pipe pipe = crtc->pipe;
7379
7380         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7381                 return;
7382
7383         if (intel_crtc_has_dp_encoder(new_crtc_state))
7384                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7385
7386         intel_set_pipe_timings(new_crtc_state);
7387         intel_set_pipe_src_size(new_crtc_state);
7388
7389         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7390                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7391                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7392         }
7393
7394         i9xx_set_pipeconf(new_crtc_state);
7395
7396         crtc->active = true;
7397
7398         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7399
7400         intel_encoders_pre_pll_enable(state, crtc);
7401
7402         if (IS_CHERRYVIEW(dev_priv)) {
7403                 chv_prepare_pll(crtc, new_crtc_state);
7404                 chv_enable_pll(crtc, new_crtc_state);
7405         } else {
7406                 vlv_prepare_pll(crtc, new_crtc_state);
7407                 vlv_enable_pll(crtc, new_crtc_state);
7408         }
7409
7410         intel_encoders_pre_enable(state, crtc);
7411
7412         i9xx_pfit_enable(new_crtc_state);
7413
7414         intel_color_load_luts(new_crtc_state);
7415         intel_color_commit(new_crtc_state);
7416         /* update DSPCNTR to configure gamma for pipe bottom color */
7417         intel_disable_primary_plane(new_crtc_state);
7418
7419         dev_priv->display.initial_watermarks(state, crtc);
7420         intel_enable_pipe(new_crtc_state);
7421
7422         intel_crtc_vblank_on(new_crtc_state);
7423
7424         intel_encoders_enable(state, crtc);
7425 }
7426
7427 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7428 {
7429         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7430         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7431
7432         intel_de_write(dev_priv, FP0(crtc->pipe),
7433                        crtc_state->dpll_hw_state.fp0);
7434         intel_de_write(dev_priv, FP1(crtc->pipe),
7435                        crtc_state->dpll_hw_state.fp1);
7436 }
7437
7438 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7439                              struct intel_crtc *crtc)
7440 {
7441         const struct intel_crtc_state *new_crtc_state =
7442                 intel_atomic_get_new_crtc_state(state, crtc);
7443         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7444         enum pipe pipe = crtc->pipe;
7445
7446         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7447                 return;
7448
7449         i9xx_set_pll_dividers(new_crtc_state);
7450
7451         if (intel_crtc_has_dp_encoder(new_crtc_state))
7452                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7453
7454         intel_set_pipe_timings(new_crtc_state);
7455         intel_set_pipe_src_size(new_crtc_state);
7456
7457         i9xx_set_pipeconf(new_crtc_state);
7458
7459         crtc->active = true;
7460
7461         if (!IS_GEN(dev_priv, 2))
7462                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7463
7464         intel_encoders_pre_enable(state, crtc);
7465
7466         i9xx_enable_pll(crtc, new_crtc_state);
7467
7468         i9xx_pfit_enable(new_crtc_state);
7469
7470         intel_color_load_luts(new_crtc_state);
7471         intel_color_commit(new_crtc_state);
7472         /* update DSPCNTR to configure gamma for pipe bottom color */
7473         intel_disable_primary_plane(new_crtc_state);
7474
7475         if (dev_priv->display.initial_watermarks)
7476                 dev_priv->display.initial_watermarks(state, crtc);
7477         else
7478                 intel_update_watermarks(crtc);
7479         intel_enable_pipe(new_crtc_state);
7480
7481         intel_crtc_vblank_on(new_crtc_state);
7482
7483         intel_encoders_enable(state, crtc);
7484 }
7485
7486 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7487 {
7488         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7489         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7490
7491         if (!old_crtc_state->gmch_pfit.control)
7492                 return;
7493
7494         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7495
7496         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7497                     intel_de_read(dev_priv, PFIT_CONTROL));
7498         intel_de_write(dev_priv, PFIT_CONTROL, 0);
7499 }
7500
7501 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7502                               struct intel_crtc *crtc)
7503 {
7504         struct intel_crtc_state *old_crtc_state =
7505                 intel_atomic_get_old_crtc_state(state, crtc);
7506         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7507         enum pipe pipe = crtc->pipe;
7508
7509         /*
7510          * On gen2 planes are double buffered but the pipe isn't, so we must
7511          * wait for planes to fully turn off before disabling the pipe.
7512          */
7513         if (IS_GEN(dev_priv, 2))
7514                 intel_wait_for_vblank(dev_priv, pipe);
7515
7516         intel_encoders_disable(state, crtc);
7517
7518         intel_crtc_vblank_off(old_crtc_state);
7519
7520         intel_disable_pipe(old_crtc_state);
7521
7522         i9xx_pfit_disable(old_crtc_state);
7523
7524         intel_encoders_post_disable(state, crtc);
7525
7526         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7527                 if (IS_CHERRYVIEW(dev_priv))
7528                         chv_disable_pll(dev_priv, pipe);
7529                 else if (IS_VALLEYVIEW(dev_priv))
7530                         vlv_disable_pll(dev_priv, pipe);
7531                 else
7532                         i9xx_disable_pll(old_crtc_state);
7533         }
7534
7535         intel_encoders_post_pll_disable(state, crtc);
7536
7537         if (!IS_GEN(dev_priv, 2))
7538                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7539
7540         if (!dev_priv->display.initial_watermarks)
7541                 intel_update_watermarks(crtc);
7542
7543         /* clock the pipe down to 640x480@60 to potentially save power */
7544         if (IS_I830(dev_priv))
7545                 i830_enable_pipe(dev_priv, pipe);
7546 }
7547
7548 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7549                                         struct drm_modeset_acquire_ctx *ctx)
7550 {
7551         struct intel_encoder *encoder;
7552         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7553         struct intel_bw_state *bw_state =
7554                 to_intel_bw_state(dev_priv->bw_obj.state);
7555         struct intel_cdclk_state *cdclk_state =
7556                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7557         struct intel_crtc_state *crtc_state =
7558                 to_intel_crtc_state(crtc->base.state);
7559         enum intel_display_power_domain domain;
7560         struct intel_plane *plane;
7561         struct drm_atomic_state *state;
7562         struct intel_crtc_state *temp_crtc_state;
7563         enum pipe pipe = crtc->pipe;
7564         u64 domains;
7565         int ret;
7566
7567         if (!crtc_state->hw.active)
7568                 return;
7569
7570         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7571                 const struct intel_plane_state *plane_state =
7572                         to_intel_plane_state(plane->base.state);
7573
7574                 if (plane_state->uapi.visible)
7575                         intel_plane_disable_noatomic(crtc, plane);
7576         }
7577
7578         state = drm_atomic_state_alloc(&dev_priv->drm);
7579         if (!state) {
7580                 drm_dbg_kms(&dev_priv->drm,
7581                             "failed to disable [CRTC:%d:%s], out of memory",
7582                             crtc->base.base.id, crtc->base.name);
7583                 return;
7584         }
7585
7586         state->acquire_ctx = ctx;
7587
7588         /* Everything's already locked, -EDEADLK can't happen. */
7589         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7590         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7591
7592         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
7593
7594         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7595
7596         drm_atomic_state_put(state);
7597
7598         drm_dbg_kms(&dev_priv->drm,
7599                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7600                     crtc->base.base.id, crtc->base.name);
7601
7602         crtc->active = false;
7603         crtc->base.enabled = false;
7604
7605         drm_WARN_ON(&dev_priv->drm,
7606                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7607         crtc_state->uapi.active = false;
7608         crtc_state->uapi.connector_mask = 0;
7609         crtc_state->uapi.encoder_mask = 0;
7610         intel_crtc_free_hw_state(crtc_state);
7611         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7612
7613         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7614                 encoder->base.crtc = NULL;
7615
7616         intel_fbc_disable(crtc);
7617         intel_update_watermarks(crtc);
7618         intel_disable_shared_dpll(crtc_state);
7619
7620         domains = crtc->enabled_power_domains;
7621         for_each_power_domain(domain, domains)
7622                 intel_display_power_put_unchecked(dev_priv, domain);
7623         crtc->enabled_power_domains = 0;
7624
7625         dev_priv->active_pipes &= ~BIT(pipe);
7626         cdclk_state->min_cdclk[pipe] = 0;
7627         cdclk_state->min_voltage_level[pipe] = 0;
7628         cdclk_state->active_pipes &= ~BIT(pipe);
7629
7630         bw_state->data_rate[pipe] = 0;
7631         bw_state->num_active_planes[pipe] = 0;
7632 }
7633
7634 /*
7635  * turn all crtc's off, but do not adjust state
7636  * This has to be paired with a call to intel_modeset_setup_hw_state.
7637  */
7638 int intel_display_suspend(struct drm_device *dev)
7639 {
7640         struct drm_i915_private *dev_priv = to_i915(dev);
7641         struct drm_atomic_state *state;
7642         int ret;
7643
7644         state = drm_atomic_helper_suspend(dev);
7645         ret = PTR_ERR_OR_ZERO(state);
7646         if (ret)
7647                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7648                         ret);
7649         else
7650                 dev_priv->modeset_restore_state = state;
7651         return ret;
7652 }
7653
7654 void intel_encoder_destroy(struct drm_encoder *encoder)
7655 {
7656         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7657
7658         drm_encoder_cleanup(encoder);
7659         kfree(intel_encoder);
7660 }
7661
7662 /* Cross check the actual hw state with our own modeset state tracking (and it's
7663  * internal consistency). */
7664 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7665                                          struct drm_connector_state *conn_state)
7666 {
7667         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7668         struct drm_i915_private *i915 = to_i915(connector->base.dev);
7669
7670         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7671                     connector->base.base.id, connector->base.name);
7672
7673         if (connector->get_hw_state(connector)) {
7674                 struct intel_encoder *encoder = intel_attached_encoder(connector);
7675
7676                 I915_STATE_WARN(!crtc_state,
7677                          "connector enabled without attached crtc\n");
7678
7679                 if (!crtc_state)
7680                         return;
7681
7682                 I915_STATE_WARN(!crtc_state->hw.active,
7683                                 "connector is active, but attached crtc isn't\n");
7684
7685                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7686                         return;
7687
7688                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7689                         "atomic encoder doesn't match attached encoder\n");
7690
7691                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7692                         "attached encoder crtc differs from connector crtc\n");
7693         } else {
7694                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7695                                 "attached crtc is active, but connector isn't\n");
7696                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7697                         "best encoder set without crtc!\n");
7698         }
7699 }
7700
7701 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7702 {
7703         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7704                 return crtc_state->fdi_lanes;
7705
7706         return 0;
7707 }
7708
7709 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7710                                struct intel_crtc_state *pipe_config)
7711 {
7712         struct drm_i915_private *dev_priv = to_i915(dev);
7713         struct drm_atomic_state *state = pipe_config->uapi.state;
7714         struct intel_crtc *other_crtc;
7715         struct intel_crtc_state *other_crtc_state;
7716
7717         drm_dbg_kms(&dev_priv->drm,
7718                     "checking fdi config on pipe %c, lanes %i\n",
7719                     pipe_name(pipe), pipe_config->fdi_lanes);
7720         if (pipe_config->fdi_lanes > 4) {
7721                 drm_dbg_kms(&dev_priv->drm,
7722                             "invalid fdi lane config on pipe %c: %i lanes\n",
7723                             pipe_name(pipe), pipe_config->fdi_lanes);
7724                 return -EINVAL;
7725         }
7726
7727         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7728                 if (pipe_config->fdi_lanes > 2) {
7729                         drm_dbg_kms(&dev_priv->drm,
7730                                     "only 2 lanes on haswell, required: %i lanes\n",
7731                                     pipe_config->fdi_lanes);
7732                         return -EINVAL;
7733                 } else {
7734                         return 0;
7735                 }
7736         }
7737
7738         if (INTEL_NUM_PIPES(dev_priv) == 2)
7739                 return 0;
7740
7741         /* Ivybridge 3 pipe is really complicated */
7742         switch (pipe) {
7743         case PIPE_A:
7744                 return 0;
7745         case PIPE_B:
7746                 if (pipe_config->fdi_lanes <= 2)
7747                         return 0;
7748
7749                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7750                 other_crtc_state =
7751                         intel_atomic_get_crtc_state(state, other_crtc);
7752                 if (IS_ERR(other_crtc_state))
7753                         return PTR_ERR(other_crtc_state);
7754
7755                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7756                         drm_dbg_kms(&dev_priv->drm,
7757                                     "invalid shared fdi lane config on pipe %c: %i lanes\n",
7758                                     pipe_name(pipe), pipe_config->fdi_lanes);
7759                         return -EINVAL;
7760                 }
7761                 return 0;
7762         case PIPE_C:
7763                 if (pipe_config->fdi_lanes > 2) {
7764                         drm_dbg_kms(&dev_priv->drm,
7765                                     "only 2 lanes on pipe %c: required %i lanes\n",
7766                                     pipe_name(pipe), pipe_config->fdi_lanes);
7767                         return -EINVAL;
7768                 }
7769
7770                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7771                 other_crtc_state =
7772                         intel_atomic_get_crtc_state(state, other_crtc);
7773                 if (IS_ERR(other_crtc_state))
7774                         return PTR_ERR(other_crtc_state);
7775
7776                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7777                         drm_dbg_kms(&dev_priv->drm,
7778                                     "fdi link B uses too many lanes to enable link C\n");
7779                         return -EINVAL;
7780                 }
7781                 return 0;
7782         default:
7783                 BUG();
7784         }
7785 }
7786
7787 #define RETRY 1
7788 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
7789                                   struct intel_crtc_state *pipe_config)
7790 {
7791         struct drm_device *dev = intel_crtc->base.dev;
7792         struct drm_i915_private *i915 = to_i915(dev);
7793         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7794         int lane, link_bw, fdi_dotclock, ret;
7795         bool needs_recompute = false;
7796
7797 retry:
7798         /* FDI is a binary signal running at ~2.7GHz, encoding
7799          * each output octet as 10 bits. The actual frequency
7800          * is stored as a divider into a 100MHz clock, and the
7801          * mode pixel clock is stored in units of 1KHz.
7802          * Hence the bw of each lane in terms of the mode signal
7803          * is:
7804          */
7805         link_bw = intel_fdi_link_freq(i915, pipe_config);
7806
7807         fdi_dotclock = adjusted_mode->crtc_clock;
7808
7809         lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
7810                                       pipe_config->pipe_bpp);
7811
7812         pipe_config->fdi_lanes = lane;
7813
7814         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7815                                link_bw, &pipe_config->fdi_m_n, false, false);
7816
7817         ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7818         if (ret == -EDEADLK)
7819                 return ret;
7820
7821         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7822                 pipe_config->pipe_bpp -= 2*3;
7823                 drm_dbg_kms(&i915->drm,
7824                             "fdi link bw constraint, reducing pipe bpp to %i\n",
7825                             pipe_config->pipe_bpp);
7826                 needs_recompute = true;
7827                 pipe_config->bw_constrained = true;
7828
7829                 goto retry;
7830         }
7831
7832         if (needs_recompute)
7833                 return RETRY;
7834
7835         return ret;
7836 }
7837
7838 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7839 {
7840         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7841         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7842
7843         /* IPS only exists on ULT machines and is tied to pipe A. */
7844         if (!hsw_crtc_supports_ips(crtc))
7845                 return false;
7846
7847         if (!i915_modparams.enable_ips)
7848                 return false;
7849
7850         if (crtc_state->pipe_bpp > 24)
7851                 return false;
7852
7853         /*
7854          * We compare against max which means we must take
7855          * the increased cdclk requirement into account when
7856          * calculating the new cdclk.
7857          *
7858          * Should measure whether using a lower cdclk w/o IPS
7859          */
7860         if (IS_BROADWELL(dev_priv) &&
7861             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7862                 return false;
7863
7864         return true;
7865 }
7866
7867 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7868 {
7869         struct drm_i915_private *dev_priv =
7870                 to_i915(crtc_state->uapi.crtc->dev);
7871         struct intel_atomic_state *state =
7872                 to_intel_atomic_state(crtc_state->uapi.state);
7873
7874         crtc_state->ips_enabled = false;
7875
7876         if (!hsw_crtc_state_ips_capable(crtc_state))
7877                 return 0;
7878
7879         /*
7880          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7881          * enabled and disabled dynamically based on package C states,
7882          * user space can't make reliable use of the CRCs, so let's just
7883          * completely disable it.
7884          */
7885         if (crtc_state->crc_enabled)
7886                 return 0;
7887
7888         /* IPS should be fine as long as at least one plane is enabled. */
7889         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7890                 return 0;
7891
7892         if (IS_BROADWELL(dev_priv)) {
7893                 const struct intel_cdclk_state *cdclk_state;
7894
7895                 cdclk_state = intel_atomic_get_cdclk_state(state);
7896                 if (IS_ERR(cdclk_state))
7897                         return PTR_ERR(cdclk_state);
7898
7899                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7900                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
7901                         return 0;
7902         }
7903
7904         crtc_state->ips_enabled = true;
7905
7906         return 0;
7907 }
7908
7909 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7910 {
7911         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7912
7913         /* GDG double wide on either pipe, otherwise pipe A only */
7914         return INTEL_GEN(dev_priv) < 4 &&
7915                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7916 }
7917
7918 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7919 {
7920         u32 pixel_rate;
7921
7922         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7923
7924         /*
7925          * We only use IF-ID interlacing. If we ever use
7926          * PF-ID we'll need to adjust the pixel_rate here.
7927          */
7928
7929         if (pipe_config->pch_pfit.enabled) {
7930                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7931                 u32 pfit_size = pipe_config->pch_pfit.size;
7932
7933                 pipe_w = pipe_config->pipe_src_w;
7934                 pipe_h = pipe_config->pipe_src_h;
7935
7936                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7937                 pfit_h = pfit_size & 0xFFFF;
7938                 if (pipe_w < pfit_w)
7939                         pipe_w = pfit_w;
7940                 if (pipe_h < pfit_h)
7941                         pipe_h = pfit_h;
7942
7943                 if (WARN_ON(!pfit_w || !pfit_h))
7944                         return pixel_rate;
7945
7946                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7947                                      pfit_w * pfit_h);
7948         }
7949
7950         return pixel_rate;
7951 }
7952
7953 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7954 {
7955         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7956
7957         if (HAS_GMCH(dev_priv))
7958                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7959                 crtc_state->pixel_rate =
7960                         crtc_state->hw.adjusted_mode.crtc_clock;
7961         else
7962                 crtc_state->pixel_rate =
7963                         ilk_pipe_pixel_rate(crtc_state);
7964 }
7965
7966 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7967                                      struct intel_crtc_state *pipe_config)
7968 {
7969         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7970         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7971         int clock_limit = dev_priv->max_dotclk_freq;
7972
7973         if (INTEL_GEN(dev_priv) < 4) {
7974                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7975
7976                 /*
7977                  * Enable double wide mode when the dot clock
7978                  * is > 90% of the (display) core speed.
7979                  */
7980                 if (intel_crtc_supports_double_wide(crtc) &&
7981                     adjusted_mode->crtc_clock > clock_limit) {
7982                         clock_limit = dev_priv->max_dotclk_freq;
7983                         pipe_config->double_wide = true;
7984                 }
7985         }
7986
7987         if (adjusted_mode->crtc_clock > clock_limit) {
7988                 drm_dbg_kms(&dev_priv->drm,
7989                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7990                             adjusted_mode->crtc_clock, clock_limit,
7991                             yesno(pipe_config->double_wide));
7992                 return -EINVAL;
7993         }
7994
7995         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7996              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7997              pipe_config->hw.ctm) {
7998                 /*
7999                  * There is only one pipe CSC unit per pipe, and we need that
8000                  * for output conversion from RGB->YCBCR. So if CTM is already
8001                  * applied we can't support YCBCR420 output.
8002                  */
8003                 drm_dbg_kms(&dev_priv->drm,
8004                             "YCBCR420 and CTM together are not possible\n");
8005                 return -EINVAL;
8006         }
8007
8008         /*
8009          * Pipe horizontal size must be even in:
8010          * - DVO ganged mode
8011          * - LVDS dual channel mode
8012          * - Double wide pipe
8013          */
8014         if (pipe_config->pipe_src_w & 1) {
8015                 if (pipe_config->double_wide) {
8016                         drm_dbg_kms(&dev_priv->drm,
8017                                     "Odd pipe source width not supported with double wide pipe\n");
8018                         return -EINVAL;
8019                 }
8020
8021                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
8022                     intel_is_dual_link_lvds(dev_priv)) {
8023                         drm_dbg_kms(&dev_priv->drm,
8024                                     "Odd pipe source width not supported with dual link LVDS\n");
8025                         return -EINVAL;
8026                 }
8027         }
8028
8029         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
8030          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8031          */
8032         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8033                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
8034                 return -EINVAL;
8035
8036         intel_crtc_compute_pixel_rate(pipe_config);
8037
8038         if (pipe_config->has_pch_encoder)
8039                 return ilk_fdi_compute_config(crtc, pipe_config);
8040
8041         return 0;
8042 }
8043
8044 static void
8045 intel_reduce_m_n_ratio(u32 *num, u32 *den)
8046 {
8047         while (*num > DATA_LINK_M_N_MASK ||
8048                *den > DATA_LINK_M_N_MASK) {
8049                 *num >>= 1;
8050                 *den >>= 1;
8051         }
8052 }
8053
8054 static void compute_m_n(unsigned int m, unsigned int n,
8055                         u32 *ret_m, u32 *ret_n,
8056                         bool constant_n)
8057 {
8058         /*
8059          * Several DP dongles in particular seem to be fussy about
8060          * too large link M/N values. Give N value as 0x8000 that
8061          * should be acceptable by specific devices. 0x8000 is the
8062          * specified fixed N value for asynchronous clock mode,
8063          * which the devices expect also in synchronous clock mode.
8064          */
8065         if (constant_n)
8066                 *ret_n = 0x8000;
8067         else
8068                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
8069
8070         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
8071         intel_reduce_m_n_ratio(ret_m, ret_n);
8072 }
8073
8074 void
8075 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
8076                        int pixel_clock, int link_clock,
8077                        struct intel_link_m_n *m_n,
8078                        bool constant_n, bool fec_enable)
8079 {
8080         u32 data_clock = bits_per_pixel * pixel_clock;
8081
8082         if (fec_enable)
8083                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
8084
8085         m_n->tu = 64;
8086         compute_m_n(data_clock,
8087                     link_clock * nlanes * 8,
8088                     &m_n->gmch_m, &m_n->gmch_n,
8089                     constant_n);
8090
8091         compute_m_n(pixel_clock, link_clock,
8092                     &m_n->link_m, &m_n->link_n,
8093                     constant_n);
8094 }
8095
8096 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
8097 {
8098         /*
8099          * There may be no VBT; and if the BIOS enabled SSC we can
8100          * just keep using it to avoid unnecessary flicker.  Whereas if the
8101          * BIOS isn't using it, don't assume it will work even if the VBT
8102          * indicates as much.
8103          */
8104         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
8105                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
8106                                                        PCH_DREF_CONTROL) &
8107                         DREF_SSC1_ENABLE;
8108
8109                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
8110                         drm_dbg_kms(&dev_priv->drm,
8111                                     "SSC %s by BIOS, overriding VBT which says %s\n",
8112                                     enableddisabled(bios_lvds_use_ssc),
8113                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
8114                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
8115                 }
8116         }
8117 }
8118
8119 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
8120 {
8121         if (i915_modparams.panel_use_ssc >= 0)
8122                 return i915_modparams.panel_use_ssc != 0;
8123         return dev_priv->vbt.lvds_use_ssc
8124                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
8125 }
8126
8127 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
8128 {
8129         return (1 << dpll->n) << 16 | dpll->m2;
8130 }
8131
8132 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
8133 {
8134         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
8135 }
8136
8137 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
8138                                      struct intel_crtc_state *crtc_state,
8139                                      struct dpll *reduced_clock)
8140 {
8141         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8142         u32 fp, fp2 = 0;
8143
8144         if (IS_PINEVIEW(dev_priv)) {
8145                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
8146                 if (reduced_clock)
8147                         fp2 = pnv_dpll_compute_fp(reduced_clock);
8148         } else {
8149                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8150                 if (reduced_clock)
8151                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
8152         }
8153
8154         crtc_state->dpll_hw_state.fp0 = fp;
8155
8156         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8157             reduced_clock) {
8158                 crtc_state->dpll_hw_state.fp1 = fp2;
8159         } else {
8160                 crtc_state->dpll_hw_state.fp1 = fp;
8161         }
8162 }
8163
8164 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
8165                 pipe)
8166 {
8167         u32 reg_val;
8168
8169         /*
8170          * PLLB opamp always calibrates to max value of 0x3f, force enable it
8171          * and set it to a reasonable value instead.
8172          */
8173         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8174         reg_val &= 0xffffff00;
8175         reg_val |= 0x00000030;
8176         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8177
8178         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8179         reg_val &= 0x00ffffff;
8180         reg_val |= 0x8c000000;
8181         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8182
8183         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8184         reg_val &= 0xffffff00;
8185         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8186
8187         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8188         reg_val &= 0x00ffffff;
8189         reg_val |= 0xb0000000;
8190         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8191 }
8192
8193 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8194                                          const struct intel_link_m_n *m_n)
8195 {
8196         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8197         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8198         enum pipe pipe = crtc->pipe;
8199
8200         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8201                        TU_SIZE(m_n->tu) | m_n->gmch_m);
8202         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8203         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8204         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8205 }
8206
8207 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8208                                  enum transcoder transcoder)
8209 {
8210         if (IS_HASWELL(dev_priv))
8211                 return transcoder == TRANSCODER_EDP;
8212
8213         /*
8214          * Strictly speaking some registers are available before
8215          * gen7, but we only support DRRS on gen7+
8216          */
8217         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8218 }
8219
8220 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8221                                          const struct intel_link_m_n *m_n,
8222                                          const struct intel_link_m_n *m2_n2)
8223 {
8224         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8225         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8226         enum pipe pipe = crtc->pipe;
8227         enum transcoder transcoder = crtc_state->cpu_transcoder;
8228
8229         if (INTEL_GEN(dev_priv) >= 5) {
8230                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8231                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8232                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8233                                m_n->gmch_n);
8234                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8235                                m_n->link_m);
8236                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8237                                m_n->link_n);
8238                 /*
8239                  *  M2_N2 registers are set only if DRRS is supported
8240                  * (to make sure the registers are not unnecessarily accessed).
8241                  */
8242                 if (m2_n2 && crtc_state->has_drrs &&
8243                     transcoder_has_m2_n2(dev_priv, transcoder)) {
8244                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8245                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8246                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8247                                        m2_n2->gmch_n);
8248                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8249                                        m2_n2->link_m);
8250                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8251                                        m2_n2->link_n);
8252                 }
8253         } else {
8254                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8255                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8256                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8257                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8258                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8259         }
8260 }
8261
8262 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8263 {
8264         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8265         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8266
8267         if (m_n == M1_N1) {
8268                 dp_m_n = &crtc_state->dp_m_n;
8269                 dp_m2_n2 = &crtc_state->dp_m2_n2;
8270         } else if (m_n == M2_N2) {
8271
8272                 /*
8273                  * M2_N2 registers are not supported. Hence m2_n2 divider value
8274                  * needs to be programmed into M1_N1.
8275                  */
8276                 dp_m_n = &crtc_state->dp_m2_n2;
8277         } else {
8278                 drm_err(&i915->drm, "Unsupported divider value\n");
8279                 return;
8280         }
8281
8282         if (crtc_state->has_pch_encoder)
8283                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8284         else
8285                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8286 }
8287
8288 static void vlv_compute_dpll(struct intel_crtc *crtc,
8289                              struct intel_crtc_state *pipe_config)
8290 {
8291         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8292                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8293         if (crtc->pipe != PIPE_A)
8294                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8295
8296         /* DPLL not used with DSI, but still need the rest set up */
8297         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8298                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8299                         DPLL_EXT_BUFFER_ENABLE_VLV;
8300
8301         pipe_config->dpll_hw_state.dpll_md =
8302                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8303 }
8304
8305 static void chv_compute_dpll(struct intel_crtc *crtc,
8306                              struct intel_crtc_state *pipe_config)
8307 {
8308         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8309                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8310         if (crtc->pipe != PIPE_A)
8311                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8312
8313         /* DPLL not used with DSI, but still need the rest set up */
8314         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8315                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8316
8317         pipe_config->dpll_hw_state.dpll_md =
8318                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8319 }
8320
8321 static void vlv_prepare_pll(struct intel_crtc *crtc,
8322                             const struct intel_crtc_state *pipe_config)
8323 {
8324         struct drm_device *dev = crtc->base.dev;
8325         struct drm_i915_private *dev_priv = to_i915(dev);
8326         enum pipe pipe = crtc->pipe;
8327         u32 mdiv;
8328         u32 bestn, bestm1, bestm2, bestp1, bestp2;
8329         u32 coreclk, reg_val;
8330
8331         /* Enable Refclk */
8332         intel_de_write(dev_priv, DPLL(pipe),
8333                        pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8334
8335         /* No need to actually set up the DPLL with DSI */
8336         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8337                 return;
8338
8339         vlv_dpio_get(dev_priv);
8340
8341         bestn = pipe_config->dpll.n;
8342         bestm1 = pipe_config->dpll.m1;
8343         bestm2 = pipe_config->dpll.m2;
8344         bestp1 = pipe_config->dpll.p1;
8345         bestp2 = pipe_config->dpll.p2;
8346
8347         /* See eDP HDMI DPIO driver vbios notes doc */
8348
8349         /* PLL B needs special handling */
8350         if (pipe == PIPE_B)
8351                 vlv_pllb_recal_opamp(dev_priv, pipe);
8352
8353         /* Set up Tx target for periodic Rcomp update */
8354         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8355
8356         /* Disable target IRef on PLL */
8357         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8358         reg_val &= 0x00ffffff;
8359         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8360
8361         /* Disable fast lock */
8362         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8363
8364         /* Set idtafcrecal before PLL is enabled */
8365         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8366         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8367         mdiv |= ((bestn << DPIO_N_SHIFT));
8368         mdiv |= (1 << DPIO_K_SHIFT);
8369
8370         /*
8371          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8372          * but we don't support that).
8373          * Note: don't use the DAC post divider as it seems unstable.
8374          */
8375         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8376         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8377
8378         mdiv |= DPIO_ENABLE_CALIBRATION;
8379         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8380
8381         /* Set HBR and RBR LPF coefficients */
8382         if (pipe_config->port_clock == 162000 ||
8383             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8384             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8385                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8386                                  0x009f0003);
8387         else
8388                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8389                                  0x00d0000f);
8390
8391         if (intel_crtc_has_dp_encoder(pipe_config)) {
8392                 /* Use SSC source */
8393                 if (pipe == PIPE_A)
8394                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8395                                          0x0df40000);
8396                 else
8397                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8398                                          0x0df70000);
8399         } else { /* HDMI or VGA */
8400                 /* Use bend source */
8401                 if (pipe == PIPE_A)
8402                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8403                                          0x0df70000);
8404                 else
8405                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8406                                          0x0df40000);
8407         }
8408
8409         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8410         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8411         if (intel_crtc_has_dp_encoder(pipe_config))
8412                 coreclk |= 0x01000000;
8413         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8414
8415         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8416
8417         vlv_dpio_put(dev_priv);
8418 }
8419
8420 static void chv_prepare_pll(struct intel_crtc *crtc,
8421                             const struct intel_crtc_state *pipe_config)
8422 {
8423         struct drm_device *dev = crtc->base.dev;
8424         struct drm_i915_private *dev_priv = to_i915(dev);
8425         enum pipe pipe = crtc->pipe;
8426         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8427         u32 loopfilter, tribuf_calcntr;
8428         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8429         u32 dpio_val;
8430         int vco;
8431
8432         /* Enable Refclk and SSC */
8433         intel_de_write(dev_priv, DPLL(pipe),
8434                        pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8435
8436         /* No need to actually set up the DPLL with DSI */
8437         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8438                 return;
8439
8440         bestn = pipe_config->dpll.n;
8441         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8442         bestm1 = pipe_config->dpll.m1;
8443         bestm2 = pipe_config->dpll.m2 >> 22;
8444         bestp1 = pipe_config->dpll.p1;
8445         bestp2 = pipe_config->dpll.p2;
8446         vco = pipe_config->dpll.vco;
8447         dpio_val = 0;
8448         loopfilter = 0;
8449
8450         vlv_dpio_get(dev_priv);
8451
8452         /* p1 and p2 divider */
8453         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8454                         5 << DPIO_CHV_S1_DIV_SHIFT |
8455                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8456                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8457                         1 << DPIO_CHV_K_DIV_SHIFT);
8458
8459         /* Feedback post-divider - m2 */
8460         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8461
8462         /* Feedback refclk divider - n and m1 */
8463         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8464                         DPIO_CHV_M1_DIV_BY_2 |
8465                         1 << DPIO_CHV_N_DIV_SHIFT);
8466
8467         /* M2 fraction division */
8468         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8469
8470         /* M2 fraction division enable */
8471         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8472         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8473         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8474         if (bestm2_frac)
8475                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8476         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8477
8478         /* Program digital lock detect threshold */
8479         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8480         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8481                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8482         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8483         if (!bestm2_frac)
8484                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8485         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8486
8487         /* Loop filter */
8488         if (vco == 5400000) {
8489                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8490                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8491                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8492                 tribuf_calcntr = 0x9;
8493         } else if (vco <= 6200000) {
8494                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8495                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8496                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8497                 tribuf_calcntr = 0x9;
8498         } else if (vco <= 6480000) {
8499                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8500                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8501                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8502                 tribuf_calcntr = 0x8;
8503         } else {
8504                 /* Not supported. Apply the same limits as in the max case */
8505                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8506                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8507                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8508                 tribuf_calcntr = 0;
8509         }
8510         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8511
8512         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8513         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8514         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8515         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8516
8517         /* AFC Recal */
8518         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8519                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8520                         DPIO_AFC_RECAL);
8521
8522         vlv_dpio_put(dev_priv);
8523 }
8524
8525 /**
8526  * vlv_force_pll_on - forcibly enable just the PLL
8527  * @dev_priv: i915 private structure
8528  * @pipe: pipe PLL to enable
8529  * @dpll: PLL configuration
8530  *
8531  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8532  * in cases where we need the PLL enabled even when @pipe is not going to
8533  * be enabled.
8534  */
8535 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8536                      const struct dpll *dpll)
8537 {
8538         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8539         struct intel_crtc_state *pipe_config;
8540
8541         pipe_config = intel_crtc_state_alloc(crtc);
8542         if (!pipe_config)
8543                 return -ENOMEM;
8544
8545         pipe_config->cpu_transcoder = (enum transcoder)pipe;
8546         pipe_config->pixel_multiplier = 1;
8547         pipe_config->dpll = *dpll;
8548
8549         if (IS_CHERRYVIEW(dev_priv)) {
8550                 chv_compute_dpll(crtc, pipe_config);
8551                 chv_prepare_pll(crtc, pipe_config);
8552                 chv_enable_pll(crtc, pipe_config);
8553         } else {
8554                 vlv_compute_dpll(crtc, pipe_config);
8555                 vlv_prepare_pll(crtc, pipe_config);
8556                 vlv_enable_pll(crtc, pipe_config);
8557         }
8558
8559         kfree(pipe_config);
8560
8561         return 0;
8562 }
8563
8564 /**
8565  * vlv_force_pll_off - forcibly disable just the PLL
8566  * @dev_priv: i915 private structure
8567  * @pipe: pipe PLL to disable
8568  *
8569  * Disable the PLL for @pipe. To be used in cases where we need
8570  * the PLL enabled even when @pipe is not going to be enabled.
8571  */
8572 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8573 {
8574         if (IS_CHERRYVIEW(dev_priv))
8575                 chv_disable_pll(dev_priv, pipe);
8576         else
8577                 vlv_disable_pll(dev_priv, pipe);
8578 }
8579
8580 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8581                               struct intel_crtc_state *crtc_state,
8582                               struct dpll *reduced_clock)
8583 {
8584         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8585         u32 dpll;
8586         struct dpll *clock = &crtc_state->dpll;
8587
8588         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8589
8590         dpll = DPLL_VGA_MODE_DIS;
8591
8592         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8593                 dpll |= DPLLB_MODE_LVDS;
8594         else
8595                 dpll |= DPLLB_MODE_DAC_SERIAL;
8596
8597         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8598             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8599                 dpll |= (crtc_state->pixel_multiplier - 1)
8600                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8601         }
8602
8603         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8604             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8605                 dpll |= DPLL_SDVO_HIGH_SPEED;
8606
8607         if (intel_crtc_has_dp_encoder(crtc_state))
8608                 dpll |= DPLL_SDVO_HIGH_SPEED;
8609
8610         /* compute bitmask from p1 value */
8611         if (IS_PINEVIEW(dev_priv))
8612                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8613         else {
8614                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8615                 if (IS_G4X(dev_priv) && reduced_clock)
8616                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8617         }
8618         switch (clock->p2) {
8619         case 5:
8620                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8621                 break;
8622         case 7:
8623                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8624                 break;
8625         case 10:
8626                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8627                 break;
8628         case 14:
8629                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8630                 break;
8631         }
8632         if (INTEL_GEN(dev_priv) >= 4)
8633                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8634
8635         if (crtc_state->sdvo_tv_clock)
8636                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8637         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8638                  intel_panel_use_ssc(dev_priv))
8639                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8640         else
8641                 dpll |= PLL_REF_INPUT_DREFCLK;
8642
8643         dpll |= DPLL_VCO_ENABLE;
8644         crtc_state->dpll_hw_state.dpll = dpll;
8645
8646         if (INTEL_GEN(dev_priv) >= 4) {
8647                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8648                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8649                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8650         }
8651 }
8652
8653 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8654                               struct intel_crtc_state *crtc_state,
8655                               struct dpll *reduced_clock)
8656 {
8657         struct drm_device *dev = crtc->base.dev;
8658         struct drm_i915_private *dev_priv = to_i915(dev);
8659         u32 dpll;
8660         struct dpll *clock = &crtc_state->dpll;
8661
8662         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8663
8664         dpll = DPLL_VGA_MODE_DIS;
8665
8666         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8667                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8668         } else {
8669                 if (clock->p1 == 2)
8670                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8671                 else
8672                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8673                 if (clock->p2 == 4)
8674                         dpll |= PLL_P2_DIVIDE_BY_4;
8675         }
8676
8677         /*
8678          * Bspec:
8679          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8680          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8681          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8682          *  Enable) must be set to “1” in both the DPLL A Control Register
8683          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8684          *
8685          * For simplicity We simply keep both bits always enabled in
8686          * both DPLLS. The spec says we should disable the DVO 2X clock
8687          * when not needed, but this seems to work fine in practice.
8688          */
8689         if (IS_I830(dev_priv) ||
8690             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8691                 dpll |= DPLL_DVO_2X_MODE;
8692
8693         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8694             intel_panel_use_ssc(dev_priv))
8695                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8696         else
8697                 dpll |= PLL_REF_INPUT_DREFCLK;
8698
8699         dpll |= DPLL_VCO_ENABLE;
8700         crtc_state->dpll_hw_state.dpll = dpll;
8701 }
8702
8703 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8704 {
8705         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8706         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8707         enum pipe pipe = crtc->pipe;
8708         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8709         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8710         u32 crtc_vtotal, crtc_vblank_end;
8711         int vsyncshift = 0;
8712
8713         /* We need to be careful not to changed the adjusted mode, for otherwise
8714          * the hw state checker will get angry at the mismatch. */
8715         crtc_vtotal = adjusted_mode->crtc_vtotal;
8716         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8717
8718         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8719                 /* the chip adds 2 halflines automatically */
8720                 crtc_vtotal -= 1;
8721                 crtc_vblank_end -= 1;
8722
8723                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8724                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8725                 else
8726                         vsyncshift = adjusted_mode->crtc_hsync_start -
8727                                 adjusted_mode->crtc_htotal / 2;
8728                 if (vsyncshift < 0)
8729                         vsyncshift += adjusted_mode->crtc_htotal;
8730         }
8731
8732         if (INTEL_GEN(dev_priv) > 3)
8733                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
8734                                vsyncshift);
8735
8736         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
8737                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
8738         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
8739                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
8740         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
8741                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
8742
8743         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
8744                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
8745         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
8746                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
8747         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
8748                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
8749
8750         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8751          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8752          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8753          * bits. */
8754         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8755             (pipe == PIPE_B || pipe == PIPE_C))
8756                 intel_de_write(dev_priv, VTOTAL(pipe),
8757                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
8758
8759 }
8760
8761 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8762 {
8763         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8764         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8765         enum pipe pipe = crtc->pipe;
8766
8767         /* pipesrc controls the size that is scaled from, which should
8768          * always be the user's requested size.
8769          */
8770         intel_de_write(dev_priv, PIPESRC(pipe),
8771                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
8772 }
8773
8774 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8775 {
8776         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8777         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8778
8779         if (IS_GEN(dev_priv, 2))
8780                 return false;
8781
8782         if (INTEL_GEN(dev_priv) >= 9 ||
8783             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8784                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8785         else
8786                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8787 }
8788
8789 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8790                                    struct intel_crtc_state *pipe_config)
8791 {
8792         struct drm_device *dev = crtc->base.dev;
8793         struct drm_i915_private *dev_priv = to_i915(dev);
8794         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8795         u32 tmp;
8796
8797         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
8798         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8799         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8800
8801         if (!transcoder_is_dsi(cpu_transcoder)) {
8802                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
8803                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8804                                                         (tmp & 0xffff) + 1;
8805                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8806                                                 ((tmp >> 16) & 0xffff) + 1;
8807         }
8808         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
8809         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8810         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8811
8812         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
8813         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8814         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8815
8816         if (!transcoder_is_dsi(cpu_transcoder)) {
8817                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
8818                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8819                                                         (tmp & 0xffff) + 1;
8820                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8821                                                 ((tmp >> 16) & 0xffff) + 1;
8822         }
8823         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
8824         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8825         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8826
8827         if (intel_pipe_is_interlaced(pipe_config)) {
8828                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8829                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8830                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8831         }
8832 }
8833
8834 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8835                                     struct intel_crtc_state *pipe_config)
8836 {
8837         struct drm_device *dev = crtc->base.dev;
8838         struct drm_i915_private *dev_priv = to_i915(dev);
8839         u32 tmp;
8840
8841         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
8842         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8843         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8844
8845         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8846         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8847 }
8848
8849 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8850                                  struct intel_crtc_state *pipe_config)
8851 {
8852         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8853         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8854         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8855         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8856
8857         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8858         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8859         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8860         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8861
8862         mode->flags = pipe_config->hw.adjusted_mode.flags;
8863         mode->type = DRM_MODE_TYPE_DRIVER;
8864
8865         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8866
8867         mode->hsync = drm_mode_hsync(mode);
8868         mode->vrefresh = drm_mode_vrefresh(mode);
8869         drm_mode_set_name(mode);
8870 }
8871
8872 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8873 {
8874         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8875         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8876         u32 pipeconf;
8877
8878         pipeconf = 0;
8879
8880         /* we keep both pipes enabled on 830 */
8881         if (IS_I830(dev_priv))
8882                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8883
8884         if (crtc_state->double_wide)
8885                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8886
8887         /* only g4x and later have fancy bpc/dither controls */
8888         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8889             IS_CHERRYVIEW(dev_priv)) {
8890                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8891                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8892                         pipeconf |= PIPECONF_DITHER_EN |
8893                                     PIPECONF_DITHER_TYPE_SP;
8894
8895                 switch (crtc_state->pipe_bpp) {
8896                 case 18:
8897                         pipeconf |= PIPECONF_6BPC;
8898                         break;
8899                 case 24:
8900                         pipeconf |= PIPECONF_8BPC;
8901                         break;
8902                 case 30:
8903                         pipeconf |= PIPECONF_10BPC;
8904                         break;
8905                 default:
8906                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8907                         BUG();
8908                 }
8909         }
8910
8911         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8912                 if (INTEL_GEN(dev_priv) < 4 ||
8913                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8914                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8915                 else
8916                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8917         } else {
8918                 pipeconf |= PIPECONF_PROGRESSIVE;
8919         }
8920
8921         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8922              crtc_state->limited_color_range)
8923                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8924
8925         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8926
8927         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8928
8929         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
8930         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
8931 }
8932
8933 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8934                                    struct intel_crtc_state *crtc_state)
8935 {
8936         struct drm_device *dev = crtc->base.dev;
8937         struct drm_i915_private *dev_priv = to_i915(dev);
8938         const struct intel_limit *limit;
8939         int refclk = 48000;
8940
8941         memset(&crtc_state->dpll_hw_state, 0,
8942                sizeof(crtc_state->dpll_hw_state));
8943
8944         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8945                 if (intel_panel_use_ssc(dev_priv)) {
8946                         refclk = dev_priv->vbt.lvds_ssc_freq;
8947                         drm_dbg_kms(&dev_priv->drm,
8948                                     "using SSC reference clock of %d kHz\n",
8949                                     refclk);
8950                 }
8951
8952                 limit = &intel_limits_i8xx_lvds;
8953         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8954                 limit = &intel_limits_i8xx_dvo;
8955         } else {
8956                 limit = &intel_limits_i8xx_dac;
8957         }
8958
8959         if (!crtc_state->clock_set &&
8960             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8961                                  refclk, NULL, &crtc_state->dpll)) {
8962                 drm_err(&dev_priv->drm,
8963                         "Couldn't find PLL settings for mode!\n");
8964                 return -EINVAL;
8965         }
8966
8967         i8xx_compute_dpll(crtc, crtc_state, NULL);
8968
8969         return 0;
8970 }
8971
8972 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8973                                   struct intel_crtc_state *crtc_state)
8974 {
8975         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8976         const struct intel_limit *limit;
8977         int refclk = 96000;
8978
8979         memset(&crtc_state->dpll_hw_state, 0,
8980                sizeof(crtc_state->dpll_hw_state));
8981
8982         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8983                 if (intel_panel_use_ssc(dev_priv)) {
8984                         refclk = dev_priv->vbt.lvds_ssc_freq;
8985                         drm_dbg_kms(&dev_priv->drm,
8986                                     "using SSC reference clock of %d kHz\n",
8987                                     refclk);
8988                 }
8989
8990                 if (intel_is_dual_link_lvds(dev_priv))
8991                         limit = &intel_limits_g4x_dual_channel_lvds;
8992                 else
8993                         limit = &intel_limits_g4x_single_channel_lvds;
8994         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8995                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8996                 limit = &intel_limits_g4x_hdmi;
8997         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8998                 limit = &intel_limits_g4x_sdvo;
8999         } else {
9000                 /* The option is for other outputs */
9001                 limit = &intel_limits_i9xx_sdvo;
9002         }
9003
9004         if (!crtc_state->clock_set &&
9005             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9006                                 refclk, NULL, &crtc_state->dpll)) {
9007                 drm_err(&dev_priv->drm,
9008                         "Couldn't find PLL settings for mode!\n");
9009                 return -EINVAL;
9010         }
9011
9012         i9xx_compute_dpll(crtc, crtc_state, NULL);
9013
9014         return 0;
9015 }
9016
9017 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
9018                                   struct intel_crtc_state *crtc_state)
9019 {
9020         struct drm_device *dev = crtc->base.dev;
9021         struct drm_i915_private *dev_priv = to_i915(dev);
9022         const struct intel_limit *limit;
9023         int refclk = 96000;
9024
9025         memset(&crtc_state->dpll_hw_state, 0,
9026                sizeof(crtc_state->dpll_hw_state));
9027
9028         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9029                 if (intel_panel_use_ssc(dev_priv)) {
9030                         refclk = dev_priv->vbt.lvds_ssc_freq;
9031                         drm_dbg_kms(&dev_priv->drm,
9032                                     "using SSC reference clock of %d kHz\n",
9033                                     refclk);
9034                 }
9035
9036                 limit = &pnv_limits_lvds;
9037         } else {
9038                 limit = &pnv_limits_sdvo;
9039         }
9040
9041         if (!crtc_state->clock_set &&
9042             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9043                                 refclk, NULL, &crtc_state->dpll)) {
9044                 drm_err(&dev_priv->drm,
9045                         "Couldn't find PLL settings for mode!\n");
9046                 return -EINVAL;
9047         }
9048
9049         i9xx_compute_dpll(crtc, crtc_state, NULL);
9050
9051         return 0;
9052 }
9053
9054 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
9055                                    struct intel_crtc_state *crtc_state)
9056 {
9057         struct drm_device *dev = crtc->base.dev;
9058         struct drm_i915_private *dev_priv = to_i915(dev);
9059         const struct intel_limit *limit;
9060         int refclk = 96000;
9061
9062         memset(&crtc_state->dpll_hw_state, 0,
9063                sizeof(crtc_state->dpll_hw_state));
9064
9065         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9066                 if (intel_panel_use_ssc(dev_priv)) {
9067                         refclk = dev_priv->vbt.lvds_ssc_freq;
9068                         drm_dbg_kms(&dev_priv->drm,
9069                                     "using SSC reference clock of %d kHz\n",
9070                                     refclk);
9071                 }
9072
9073                 limit = &intel_limits_i9xx_lvds;
9074         } else {
9075                 limit = &intel_limits_i9xx_sdvo;
9076         }
9077
9078         if (!crtc_state->clock_set &&
9079             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9080                                  refclk, NULL, &crtc_state->dpll)) {
9081                 drm_err(&dev_priv->drm,
9082                         "Couldn't find PLL settings for mode!\n");
9083                 return -EINVAL;
9084         }
9085
9086         i9xx_compute_dpll(crtc, crtc_state, NULL);
9087
9088         return 0;
9089 }
9090
9091 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
9092                                   struct intel_crtc_state *crtc_state)
9093 {
9094         int refclk = 100000;
9095         const struct intel_limit *limit = &intel_limits_chv;
9096         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9097
9098         memset(&crtc_state->dpll_hw_state, 0,
9099                sizeof(crtc_state->dpll_hw_state));
9100
9101         if (!crtc_state->clock_set &&
9102             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9103                                 refclk, NULL, &crtc_state->dpll)) {
9104                 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
9105                 return -EINVAL;
9106         }
9107
9108         chv_compute_dpll(crtc, crtc_state);
9109
9110         return 0;
9111 }
9112
9113 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
9114                                   struct intel_crtc_state *crtc_state)
9115 {
9116         int refclk = 100000;
9117         const struct intel_limit *limit = &intel_limits_vlv;
9118         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9119
9120         memset(&crtc_state->dpll_hw_state, 0,
9121                sizeof(crtc_state->dpll_hw_state));
9122
9123         if (!crtc_state->clock_set &&
9124             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9125                                 refclk, NULL, &crtc_state->dpll)) {
9126                 drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
9127                 return -EINVAL;
9128         }
9129
9130         vlv_compute_dpll(crtc, crtc_state);
9131
9132         return 0;
9133 }
9134
9135 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
9136 {
9137         if (IS_I830(dev_priv))
9138                 return false;
9139
9140         return INTEL_GEN(dev_priv) >= 4 ||
9141                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
9142 }
9143
9144 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
9145                                  struct intel_crtc_state *pipe_config)
9146 {
9147         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9148         u32 tmp;
9149
9150         if (!i9xx_has_pfit(dev_priv))
9151                 return;
9152
9153         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
9154         if (!(tmp & PFIT_ENABLE))
9155                 return;
9156
9157         /* Check whether the pfit is attached to our pipe. */
9158         if (INTEL_GEN(dev_priv) < 4) {
9159                 if (crtc->pipe != PIPE_B)
9160                         return;
9161         } else {
9162                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
9163                         return;
9164         }
9165
9166         pipe_config->gmch_pfit.control = tmp;
9167         pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
9168                                                           PFIT_PGM_RATIOS);
9169 }
9170
9171 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
9172                                struct intel_crtc_state *pipe_config)
9173 {
9174         struct drm_device *dev = crtc->base.dev;
9175         struct drm_i915_private *dev_priv = to_i915(dev);
9176         enum pipe pipe = crtc->pipe;
9177         struct dpll clock;
9178         u32 mdiv;
9179         int refclk = 100000;
9180
9181         /* In case of DSI, DPLL will not be used */
9182         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9183                 return;
9184
9185         vlv_dpio_get(dev_priv);
9186         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
9187         vlv_dpio_put(dev_priv);
9188
9189         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
9190         clock.m2 = mdiv & DPIO_M2DIV_MASK;
9191         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
9192         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
9193         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
9194
9195         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
9196 }
9197
9198 static void
9199 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
9200                               struct intel_initial_plane_config *plane_config)
9201 {
9202         struct drm_device *dev = crtc->base.dev;
9203         struct drm_i915_private *dev_priv = to_i915(dev);
9204         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9205         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9206         enum pipe pipe;
9207         u32 val, base, offset;
9208         int fourcc, pixel_format;
9209         unsigned int aligned_height;
9210         struct drm_framebuffer *fb;
9211         struct intel_framebuffer *intel_fb;
9212
9213         if (!plane->get_hw_state(plane, &pipe))
9214                 return;
9215
9216         drm_WARN_ON(dev, pipe != crtc->pipe);
9217
9218         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9219         if (!intel_fb) {
9220                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9221                 return;
9222         }
9223
9224         fb = &intel_fb->base;
9225
9226         fb->dev = dev;
9227
9228         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9229
9230         if (INTEL_GEN(dev_priv) >= 4) {
9231                 if (val & DISPPLANE_TILED) {
9232                         plane_config->tiling = I915_TILING_X;
9233                         fb->modifier = I915_FORMAT_MOD_X_TILED;
9234                 }
9235
9236                 if (val & DISPPLANE_ROTATE_180)
9237                         plane_config->rotation = DRM_MODE_ROTATE_180;
9238         }
9239
9240         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9241             val & DISPPLANE_MIRROR)
9242                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9243
9244         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9245         fourcc = i9xx_format_to_fourcc(pixel_format);
9246         fb->format = drm_format_info(fourcc);
9247
9248         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9249                 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9250                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9251         } else if (INTEL_GEN(dev_priv) >= 4) {
9252                 if (plane_config->tiling)
9253                         offset = intel_de_read(dev_priv,
9254                                                DSPTILEOFF(i9xx_plane));
9255                 else
9256                         offset = intel_de_read(dev_priv,
9257                                                DSPLINOFF(i9xx_plane));
9258                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9259         } else {
9260                 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9261         }
9262         plane_config->base = base;
9263
9264         val = intel_de_read(dev_priv, PIPESRC(pipe));
9265         fb->width = ((val >> 16) & 0xfff) + 1;
9266         fb->height = ((val >> 0) & 0xfff) + 1;
9267
9268         val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9269         fb->pitches[0] = val & 0xffffffc0;
9270
9271         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9272
9273         plane_config->size = fb->pitches[0] * aligned_height;
9274
9275         drm_dbg_kms(&dev_priv->drm,
9276                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9277                     crtc->base.name, plane->base.name, fb->width, fb->height,
9278                     fb->format->cpp[0] * 8, base, fb->pitches[0],
9279                     plane_config->size);
9280
9281         plane_config->fb = intel_fb;
9282 }
9283
9284 static void chv_crtc_clock_get(struct intel_crtc *crtc,
9285                                struct intel_crtc_state *pipe_config)
9286 {
9287         struct drm_device *dev = crtc->base.dev;
9288         struct drm_i915_private *dev_priv = to_i915(dev);
9289         enum pipe pipe = crtc->pipe;
9290         enum dpio_channel port = vlv_pipe_to_channel(pipe);
9291         struct dpll clock;
9292         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9293         int refclk = 100000;
9294
9295         /* In case of DSI, DPLL will not be used */
9296         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9297                 return;
9298
9299         vlv_dpio_get(dev_priv);
9300         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9301         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9302         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9303         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9304         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9305         vlv_dpio_put(dev_priv);
9306
9307         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9308         clock.m2 = (pll_dw0 & 0xff) << 22;
9309         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9310                 clock.m2 |= pll_dw2 & 0x3fffff;
9311         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9312         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9313         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9314
9315         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9316 }
9317
9318 static enum intel_output_format
9319 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9320 {
9321         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9322         u32 tmp;
9323
9324         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9325
9326         if (tmp & PIPEMISC_YUV420_ENABLE) {
9327                 /* We support 4:2:0 in full blend mode only */
9328                 drm_WARN_ON(&dev_priv->drm,
9329                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9330
9331                 return INTEL_OUTPUT_FORMAT_YCBCR420;
9332         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9333                 return INTEL_OUTPUT_FORMAT_YCBCR444;
9334         } else {
9335                 return INTEL_OUTPUT_FORMAT_RGB;
9336         }
9337 }
9338
9339 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9340 {
9341         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9342         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9343         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9344         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9345         u32 tmp;
9346
9347         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9348
9349         if (tmp & DISPPLANE_GAMMA_ENABLE)
9350                 crtc_state->gamma_enable = true;
9351
9352         if (!HAS_GMCH(dev_priv) &&
9353             tmp & DISPPLANE_PIPE_CSC_ENABLE)
9354                 crtc_state->csc_enable = true;
9355 }
9356
9357 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9358                                  struct intel_crtc_state *pipe_config)
9359 {
9360         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9361         enum intel_display_power_domain power_domain;
9362         intel_wakeref_t wakeref;
9363         u32 tmp;
9364         bool ret;
9365
9366         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9367         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9368         if (!wakeref)
9369                 return false;
9370
9371         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9372         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9373         pipe_config->shared_dpll = NULL;
9374
9375         ret = false;
9376
9377         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9378         if (!(tmp & PIPECONF_ENABLE))
9379                 goto out;
9380
9381         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9382             IS_CHERRYVIEW(dev_priv)) {
9383                 switch (tmp & PIPECONF_BPC_MASK) {
9384                 case PIPECONF_6BPC:
9385                         pipe_config->pipe_bpp = 18;
9386                         break;
9387                 case PIPECONF_8BPC:
9388                         pipe_config->pipe_bpp = 24;
9389                         break;
9390                 case PIPECONF_10BPC:
9391                         pipe_config->pipe_bpp = 30;
9392                         break;
9393                 default:
9394                         break;
9395                 }
9396         }
9397
9398         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9399             (tmp & PIPECONF_COLOR_RANGE_SELECT))
9400                 pipe_config->limited_color_range = true;
9401
9402         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9403                 PIPECONF_GAMMA_MODE_SHIFT;
9404
9405         if (IS_CHERRYVIEW(dev_priv))
9406                 pipe_config->cgm_mode = intel_de_read(dev_priv,
9407                                                       CGM_PIPE_MODE(crtc->pipe));
9408
9409         i9xx_get_pipe_color_config(pipe_config);
9410         intel_color_get_config(pipe_config);
9411
9412         if (INTEL_GEN(dev_priv) < 4)
9413                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9414
9415         intel_get_pipe_timings(crtc, pipe_config);
9416         intel_get_pipe_src_size(crtc, pipe_config);
9417
9418         i9xx_get_pfit_config(crtc, pipe_config);
9419
9420         if (INTEL_GEN(dev_priv) >= 4) {
9421                 /* No way to read it out on pipes B and C */
9422                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9423                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9424                 else
9425                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9426                 pipe_config->pixel_multiplier =
9427                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9428                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9429                 pipe_config->dpll_hw_state.dpll_md = tmp;
9430         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9431                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9432                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9433                 pipe_config->pixel_multiplier =
9434                         ((tmp & SDVO_MULTIPLIER_MASK)
9435                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9436         } else {
9437                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9438                  * port and will be fixed up in the encoder->get_config
9439                  * function. */
9440                 pipe_config->pixel_multiplier = 1;
9441         }
9442         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9443                                                         DPLL(crtc->pipe));
9444         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9445                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9446                                                                FP0(crtc->pipe));
9447                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9448                                                                FP1(crtc->pipe));
9449         } else {
9450                 /* Mask out read-only status bits. */
9451                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9452                                                      DPLL_PORTC_READY_MASK |
9453                                                      DPLL_PORTB_READY_MASK);
9454         }
9455
9456         if (IS_CHERRYVIEW(dev_priv))
9457                 chv_crtc_clock_get(crtc, pipe_config);
9458         else if (IS_VALLEYVIEW(dev_priv))
9459                 vlv_crtc_clock_get(crtc, pipe_config);
9460         else
9461                 i9xx_crtc_clock_get(crtc, pipe_config);
9462
9463         /*
9464          * Normally the dotclock is filled in by the encoder .get_config()
9465          * but in case the pipe is enabled w/o any ports we need a sane
9466          * default.
9467          */
9468         pipe_config->hw.adjusted_mode.crtc_clock =
9469                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9470
9471         ret = true;
9472
9473 out:
9474         intel_display_power_put(dev_priv, power_domain, wakeref);
9475
9476         return ret;
9477 }
9478
9479 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9480 {
9481         struct intel_encoder *encoder;
9482         int i;
9483         u32 val, final;
9484         bool has_lvds = false;
9485         bool has_cpu_edp = false;
9486         bool has_panel = false;
9487         bool has_ck505 = false;
9488         bool can_ssc = false;
9489         bool using_ssc_source = false;
9490
9491         /* We need to take the global config into account */
9492         for_each_intel_encoder(&dev_priv->drm, encoder) {
9493                 switch (encoder->type) {
9494                 case INTEL_OUTPUT_LVDS:
9495                         has_panel = true;
9496                         has_lvds = true;
9497                         break;
9498                 case INTEL_OUTPUT_EDP:
9499                         has_panel = true;
9500                         if (encoder->port == PORT_A)
9501                                 has_cpu_edp = true;
9502                         break;
9503                 default:
9504                         break;
9505                 }
9506         }
9507
9508         if (HAS_PCH_IBX(dev_priv)) {
9509                 has_ck505 = dev_priv->vbt.display_clock_mode;
9510                 can_ssc = has_ck505;
9511         } else {
9512                 has_ck505 = false;
9513                 can_ssc = true;
9514         }
9515
9516         /* Check if any DPLLs are using the SSC source */
9517         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
9518                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9519
9520                 if (!(temp & DPLL_VCO_ENABLE))
9521                         continue;
9522
9523                 if ((temp & PLL_REF_INPUT_MASK) ==
9524                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9525                         using_ssc_source = true;
9526                         break;
9527                 }
9528         }
9529
9530         drm_dbg_kms(&dev_priv->drm,
9531                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9532                     has_panel, has_lvds, has_ck505, using_ssc_source);
9533
9534         /* Ironlake: try to setup display ref clock before DPLL
9535          * enabling. This is only under driver's control after
9536          * PCH B stepping, previous chipset stepping should be
9537          * ignoring this setting.
9538          */
9539         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9540
9541         /* As we must carefully and slowly disable/enable each source in turn,
9542          * compute the final state we want first and check if we need to
9543          * make any changes at all.
9544          */
9545         final = val;
9546         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9547         if (has_ck505)
9548                 final |= DREF_NONSPREAD_CK505_ENABLE;
9549         else
9550                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9551
9552         final &= ~DREF_SSC_SOURCE_MASK;
9553         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9554         final &= ~DREF_SSC1_ENABLE;
9555
9556         if (has_panel) {
9557                 final |= DREF_SSC_SOURCE_ENABLE;
9558
9559                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9560                         final |= DREF_SSC1_ENABLE;
9561
9562                 if (has_cpu_edp) {
9563                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9564                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9565                         else
9566                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9567                 } else
9568                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9569         } else if (using_ssc_source) {
9570                 final |= DREF_SSC_SOURCE_ENABLE;
9571                 final |= DREF_SSC1_ENABLE;
9572         }
9573
9574         if (final == val)
9575                 return;
9576
9577         /* Always enable nonspread source */
9578         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9579
9580         if (has_ck505)
9581                 val |= DREF_NONSPREAD_CK505_ENABLE;
9582         else
9583                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9584
9585         if (has_panel) {
9586                 val &= ~DREF_SSC_SOURCE_MASK;
9587                 val |= DREF_SSC_SOURCE_ENABLE;
9588
9589                 /* SSC must be turned on before enabling the CPU output  */
9590                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9591                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9592                         val |= DREF_SSC1_ENABLE;
9593                 } else
9594                         val &= ~DREF_SSC1_ENABLE;
9595
9596                 /* Get SSC going before enabling the outputs */
9597                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9598                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9599                 udelay(200);
9600
9601                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9602
9603                 /* Enable CPU source on CPU attached eDP */
9604                 if (has_cpu_edp) {
9605                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9606                                 drm_dbg_kms(&dev_priv->drm,
9607                                             "Using SSC on eDP\n");
9608                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9609                         } else
9610                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9611                 } else
9612                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9613
9614                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9615                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9616                 udelay(200);
9617         } else {
9618                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9619
9620                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9621
9622                 /* Turn off CPU output */
9623                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9624
9625                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9626                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9627                 udelay(200);
9628
9629                 if (!using_ssc_source) {
9630                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9631
9632                         /* Turn off the SSC source */
9633                         val &= ~DREF_SSC_SOURCE_MASK;
9634                         val |= DREF_SSC_SOURCE_DISABLE;
9635
9636                         /* Turn off SSC1 */
9637                         val &= ~DREF_SSC1_ENABLE;
9638
9639                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9640                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9641                         udelay(200);
9642                 }
9643         }
9644
9645         BUG_ON(val != final);
9646 }
9647
9648 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9649 {
9650         u32 tmp;
9651
9652         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9653         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9654         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9655
9656         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9657                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9658                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9659
9660         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9661         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9662         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9663
9664         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9665                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9666                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9667 }
9668
9669 /* WaMPhyProgramming:hsw */
9670 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9671 {
9672         u32 tmp;
9673
9674         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9675         tmp &= ~(0xFF << 24);
9676         tmp |= (0x12 << 24);
9677         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9678
9679         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9680         tmp |= (1 << 11);
9681         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9682
9683         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9684         tmp |= (1 << 11);
9685         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9686
9687         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9688         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9689         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9690
9691         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9692         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9693         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9694
9695         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9696         tmp &= ~(7 << 13);
9697         tmp |= (5 << 13);
9698         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9699
9700         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9701         tmp &= ~(7 << 13);
9702         tmp |= (5 << 13);
9703         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9704
9705         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9706         tmp &= ~0xFF;
9707         tmp |= 0x1C;
9708         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9709
9710         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9711         tmp &= ~0xFF;
9712         tmp |= 0x1C;
9713         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9714
9715         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9716         tmp &= ~(0xFF << 16);
9717         tmp |= (0x1C << 16);
9718         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9719
9720         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9721         tmp &= ~(0xFF << 16);
9722         tmp |= (0x1C << 16);
9723         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9724
9725         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9726         tmp |= (1 << 27);
9727         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9728
9729         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9730         tmp |= (1 << 27);
9731         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9732
9733         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9734         tmp &= ~(0xF << 28);
9735         tmp |= (4 << 28);
9736         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9737
9738         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9739         tmp &= ~(0xF << 28);
9740         tmp |= (4 << 28);
9741         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9742 }
9743
9744 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9745  * Programming" based on the parameters passed:
9746  * - Sequence to enable CLKOUT_DP
9747  * - Sequence to enable CLKOUT_DP without spread
9748  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9749  */
9750 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9751                                  bool with_spread, bool with_fdi)
9752 {
9753         u32 reg, tmp;
9754
9755         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
9756                      "FDI requires downspread\n"))
9757                 with_spread = true;
9758         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
9759                      with_fdi, "LP PCH doesn't have FDI\n"))
9760                 with_fdi = false;
9761
9762         mutex_lock(&dev_priv->sb_lock);
9763
9764         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9765         tmp &= ~SBI_SSCCTL_DISABLE;
9766         tmp |= SBI_SSCCTL_PATHALT;
9767         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9768
9769         udelay(24);
9770
9771         if (with_spread) {
9772                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9773                 tmp &= ~SBI_SSCCTL_PATHALT;
9774                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9775
9776                 if (with_fdi) {
9777                         lpt_reset_fdi_mphy(dev_priv);
9778                         lpt_program_fdi_mphy(dev_priv);
9779                 }
9780         }
9781
9782         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9783         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9784         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9785         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9786
9787         mutex_unlock(&dev_priv->sb_lock);
9788 }
9789
9790 /* Sequence to disable CLKOUT_DP */
9791 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9792 {
9793         u32 reg, tmp;
9794
9795         mutex_lock(&dev_priv->sb_lock);
9796
9797         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9798         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9799         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9800         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9801
9802         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9803         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9804                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9805                         tmp |= SBI_SSCCTL_PATHALT;
9806                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9807                         udelay(32);
9808                 }
9809                 tmp |= SBI_SSCCTL_DISABLE;
9810                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9811         }
9812
9813         mutex_unlock(&dev_priv->sb_lock);
9814 }
9815
9816 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9817
9818 static const u16 sscdivintphase[] = {
9819         [BEND_IDX( 50)] = 0x3B23,
9820         [BEND_IDX( 45)] = 0x3B23,
9821         [BEND_IDX( 40)] = 0x3C23,
9822         [BEND_IDX( 35)] = 0x3C23,
9823         [BEND_IDX( 30)] = 0x3D23,
9824         [BEND_IDX( 25)] = 0x3D23,
9825         [BEND_IDX( 20)] = 0x3E23,
9826         [BEND_IDX( 15)] = 0x3E23,
9827         [BEND_IDX( 10)] = 0x3F23,
9828         [BEND_IDX(  5)] = 0x3F23,
9829         [BEND_IDX(  0)] = 0x0025,
9830         [BEND_IDX( -5)] = 0x0025,
9831         [BEND_IDX(-10)] = 0x0125,
9832         [BEND_IDX(-15)] = 0x0125,
9833         [BEND_IDX(-20)] = 0x0225,
9834         [BEND_IDX(-25)] = 0x0225,
9835         [BEND_IDX(-30)] = 0x0325,
9836         [BEND_IDX(-35)] = 0x0325,
9837         [BEND_IDX(-40)] = 0x0425,
9838         [BEND_IDX(-45)] = 0x0425,
9839         [BEND_IDX(-50)] = 0x0525,
9840 };
9841
9842 /*
9843  * Bend CLKOUT_DP
9844  * steps -50 to 50 inclusive, in steps of 5
9845  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9846  * change in clock period = -(steps / 10) * 5.787 ps
9847  */
9848 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9849 {
9850         u32 tmp;
9851         int idx = BEND_IDX(steps);
9852
9853         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
9854                 return;
9855
9856         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
9857                 return;
9858
9859         mutex_lock(&dev_priv->sb_lock);
9860
9861         if (steps % 10 != 0)
9862                 tmp = 0xAAAAAAAB;
9863         else
9864                 tmp = 0x00000000;
9865         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9866
9867         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9868         tmp &= 0xffff0000;
9869         tmp |= sscdivintphase[idx];
9870         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9871
9872         mutex_unlock(&dev_priv->sb_lock);
9873 }
9874
9875 #undef BEND_IDX
9876
9877 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9878 {
9879         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9880         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
9881
9882         if ((ctl & SPLL_PLL_ENABLE) == 0)
9883                 return false;
9884
9885         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9886             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9887                 return true;
9888
9889         if (IS_BROADWELL(dev_priv) &&
9890             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9891                 return true;
9892
9893         return false;
9894 }
9895
9896 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9897                                enum intel_dpll_id id)
9898 {
9899         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9900         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
9901
9902         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9903                 return false;
9904
9905         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9906                 return true;
9907
9908         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9909             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9910             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9911                 return true;
9912
9913         return false;
9914 }
9915
9916 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9917 {
9918         struct intel_encoder *encoder;
9919         bool has_fdi = false;
9920
9921         for_each_intel_encoder(&dev_priv->drm, encoder) {
9922                 switch (encoder->type) {
9923                 case INTEL_OUTPUT_ANALOG:
9924                         has_fdi = true;
9925                         break;
9926                 default:
9927                         break;
9928                 }
9929         }
9930
9931         /*
9932          * The BIOS may have decided to use the PCH SSC
9933          * reference so we must not disable it until the
9934          * relevant PLLs have stopped relying on it. We'll
9935          * just leave the PCH SSC reference enabled in case
9936          * any active PLL is using it. It will get disabled
9937          * after runtime suspend if we don't have FDI.
9938          *
9939          * TODO: Move the whole reference clock handling
9940          * to the modeset sequence proper so that we can
9941          * actually enable/disable/reconfigure these things
9942          * safely. To do that we need to introduce a real
9943          * clock hierarchy. That would also allow us to do
9944          * clock bending finally.
9945          */
9946         dev_priv->pch_ssc_use = 0;
9947
9948         if (spll_uses_pch_ssc(dev_priv)) {
9949                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
9950                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9951         }
9952
9953         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9954                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
9955                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9956         }
9957
9958         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9959                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
9960                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9961         }
9962
9963         if (dev_priv->pch_ssc_use)
9964                 return;
9965
9966         if (has_fdi) {
9967                 lpt_bend_clkout_dp(dev_priv, 0);
9968                 lpt_enable_clkout_dp(dev_priv, true, true);
9969         } else {
9970                 lpt_disable_clkout_dp(dev_priv);
9971         }
9972 }
9973
9974 /*
9975  * Initialize reference clocks when the driver loads
9976  */
9977 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9978 {
9979         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9980                 ilk_init_pch_refclk(dev_priv);
9981         else if (HAS_PCH_LPT(dev_priv))
9982                 lpt_init_pch_refclk(dev_priv);
9983 }
9984
9985 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
9986 {
9987         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9988         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9989         enum pipe pipe = crtc->pipe;
9990         u32 val;
9991
9992         val = 0;
9993
9994         switch (crtc_state->pipe_bpp) {
9995         case 18:
9996                 val |= PIPECONF_6BPC;
9997                 break;
9998         case 24:
9999                 val |= PIPECONF_8BPC;
10000                 break;
10001         case 30:
10002                 val |= PIPECONF_10BPC;
10003                 break;
10004         case 36:
10005                 val |= PIPECONF_12BPC;
10006                 break;
10007         default:
10008                 /* Case prevented by intel_choose_pipe_bpp_dither. */
10009                 BUG();
10010         }
10011
10012         if (crtc_state->dither)
10013                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10014
10015         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10016                 val |= PIPECONF_INTERLACED_ILK;
10017         else
10018                 val |= PIPECONF_PROGRESSIVE;
10019
10020         /*
10021          * This would end up with an odd purple hue over
10022          * the entire display. Make sure we don't do it.
10023          */
10024         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
10025                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
10026
10027         if (crtc_state->limited_color_range)
10028                 val |= PIPECONF_COLOR_RANGE_SELECT;
10029
10030         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10031                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
10032
10033         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
10034
10035         val |= PIPECONF_FRAME_START_DELAY(0);
10036
10037         intel_de_write(dev_priv, PIPECONF(pipe), val);
10038         intel_de_posting_read(dev_priv, PIPECONF(pipe));
10039 }
10040
10041 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
10042 {
10043         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10044         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10045         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10046         u32 val = 0;
10047
10048         if (IS_HASWELL(dev_priv) && crtc_state->dither)
10049                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10050
10051         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10052                 val |= PIPECONF_INTERLACED_ILK;
10053         else
10054                 val |= PIPECONF_PROGRESSIVE;
10055
10056         if (IS_HASWELL(dev_priv) &&
10057             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10058                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
10059
10060         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
10061         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
10062 }
10063
10064 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
10065 {
10066         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10067         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10068         u32 val = 0;
10069
10070         switch (crtc_state->pipe_bpp) {
10071         case 18:
10072                 val |= PIPEMISC_DITHER_6_BPC;
10073                 break;
10074         case 24:
10075                 val |= PIPEMISC_DITHER_8_BPC;
10076                 break;
10077         case 30:
10078                 val |= PIPEMISC_DITHER_10_BPC;
10079                 break;
10080         case 36:
10081                 val |= PIPEMISC_DITHER_12_BPC;
10082                 break;
10083         default:
10084                 MISSING_CASE(crtc_state->pipe_bpp);
10085                 break;
10086         }
10087
10088         if (crtc_state->dither)
10089                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
10090
10091         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
10092             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
10093                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
10094
10095         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
10096                 val |= PIPEMISC_YUV420_ENABLE |
10097                         PIPEMISC_YUV420_MODE_FULL_BLEND;
10098
10099         if (INTEL_GEN(dev_priv) >= 11 &&
10100             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
10101                                            BIT(PLANE_CURSOR))) == 0)
10102                 val |= PIPEMISC_HDR_MODE_PRECISION;
10103
10104         if (INTEL_GEN(dev_priv) >= 12)
10105                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
10106
10107         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
10108 }
10109
10110 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
10111 {
10112         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10113         u32 tmp;
10114
10115         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
10116
10117         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
10118         case PIPEMISC_DITHER_6_BPC:
10119                 return 18;
10120         case PIPEMISC_DITHER_8_BPC:
10121                 return 24;
10122         case PIPEMISC_DITHER_10_BPC:
10123                 return 30;
10124         case PIPEMISC_DITHER_12_BPC:
10125                 return 36;
10126         default:
10127                 MISSING_CASE(tmp);
10128                 return 0;
10129         }
10130 }
10131
10132 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
10133 {
10134         /*
10135          * Account for spread spectrum to avoid
10136          * oversubscribing the link. Max center spread
10137          * is 2.5%; use 5% for safety's sake.
10138          */
10139         u32 bps = target_clock * bpp * 21 / 20;
10140         return DIV_ROUND_UP(bps, link_bw * 8);
10141 }
10142
10143 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
10144 {
10145         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
10146 }
10147
10148 static void ilk_compute_dpll(struct intel_crtc *crtc,
10149                              struct intel_crtc_state *crtc_state,
10150                              struct dpll *reduced_clock)
10151 {
10152         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10153         u32 dpll, fp, fp2;
10154         int factor;
10155
10156         /* Enable autotuning of the PLL clock (if permissible) */
10157         factor = 21;
10158         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10159                 if ((intel_panel_use_ssc(dev_priv) &&
10160                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
10161                     (HAS_PCH_IBX(dev_priv) &&
10162                      intel_is_dual_link_lvds(dev_priv)))
10163                         factor = 25;
10164         } else if (crtc_state->sdvo_tv_clock) {
10165                 factor = 20;
10166         }
10167
10168         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
10169
10170         if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
10171                 fp |= FP_CB_TUNE;
10172
10173         if (reduced_clock) {
10174                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
10175
10176                 if (reduced_clock->m < factor * reduced_clock->n)
10177                         fp2 |= FP_CB_TUNE;
10178         } else {
10179                 fp2 = fp;
10180         }
10181
10182         dpll = 0;
10183
10184         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
10185                 dpll |= DPLLB_MODE_LVDS;
10186         else
10187                 dpll |= DPLLB_MODE_DAC_SERIAL;
10188
10189         dpll |= (crtc_state->pixel_multiplier - 1)
10190                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
10191
10192         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
10193             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
10194                 dpll |= DPLL_SDVO_HIGH_SPEED;
10195
10196         if (intel_crtc_has_dp_encoder(crtc_state))
10197                 dpll |= DPLL_SDVO_HIGH_SPEED;
10198
10199         /*
10200          * The high speed IO clock is only really required for
10201          * SDVO/HDMI/DP, but we also enable it for CRT to make it
10202          * possible to share the DPLL between CRT and HDMI. Enabling
10203          * the clock needlessly does no real harm, except use up a
10204          * bit of power potentially.
10205          *
10206          * We'll limit this to IVB with 3 pipes, since it has only two
10207          * DPLLs and so DPLL sharing is the only way to get three pipes
10208          * driving PCH ports at the same time. On SNB we could do this,
10209          * and potentially avoid enabling the second DPLL, but it's not
10210          * clear if it''s a win or loss power wise. No point in doing
10211          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10212          */
10213         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10214             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10215                 dpll |= DPLL_SDVO_HIGH_SPEED;
10216
10217         /* compute bitmask from p1 value */
10218         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10219         /* also FPA1 */
10220         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10221
10222         switch (crtc_state->dpll.p2) {
10223         case 5:
10224                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10225                 break;
10226         case 7:
10227                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10228                 break;
10229         case 10:
10230                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10231                 break;
10232         case 14:
10233                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10234                 break;
10235         }
10236
10237         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10238             intel_panel_use_ssc(dev_priv))
10239                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10240         else
10241                 dpll |= PLL_REF_INPUT_DREFCLK;
10242
10243         dpll |= DPLL_VCO_ENABLE;
10244
10245         crtc_state->dpll_hw_state.dpll = dpll;
10246         crtc_state->dpll_hw_state.fp0 = fp;
10247         crtc_state->dpll_hw_state.fp1 = fp2;
10248 }
10249
10250 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10251                                   struct intel_crtc_state *crtc_state)
10252 {
10253         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10254         struct intel_atomic_state *state =
10255                 to_intel_atomic_state(crtc_state->uapi.state);
10256         const struct intel_limit *limit;
10257         int refclk = 120000;
10258
10259         memset(&crtc_state->dpll_hw_state, 0,
10260                sizeof(crtc_state->dpll_hw_state));
10261
10262         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10263         if (!crtc_state->has_pch_encoder)
10264                 return 0;
10265
10266         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10267                 if (intel_panel_use_ssc(dev_priv)) {
10268                         drm_dbg_kms(&dev_priv->drm,
10269                                     "using SSC reference clock of %d kHz\n",
10270                                     dev_priv->vbt.lvds_ssc_freq);
10271                         refclk = dev_priv->vbt.lvds_ssc_freq;
10272                 }
10273
10274                 if (intel_is_dual_link_lvds(dev_priv)) {
10275                         if (refclk == 100000)
10276                                 limit = &ilk_limits_dual_lvds_100m;
10277                         else
10278                                 limit = &ilk_limits_dual_lvds;
10279                 } else {
10280                         if (refclk == 100000)
10281                                 limit = &ilk_limits_single_lvds_100m;
10282                         else
10283                                 limit = &ilk_limits_single_lvds;
10284                 }
10285         } else {
10286                 limit = &ilk_limits_dac;
10287         }
10288
10289         if (!crtc_state->clock_set &&
10290             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10291                                 refclk, NULL, &crtc_state->dpll)) {
10292                 drm_err(&dev_priv->drm,
10293                         "Couldn't find PLL settings for mode!\n");
10294                 return -EINVAL;
10295         }
10296
10297         ilk_compute_dpll(crtc, crtc_state, NULL);
10298
10299         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10300                 drm_dbg_kms(&dev_priv->drm,
10301                             "failed to find PLL for pipe %c\n",
10302                             pipe_name(crtc->pipe));
10303                 return -EINVAL;
10304         }
10305
10306         return 0;
10307 }
10308
10309 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10310                                          struct intel_link_m_n *m_n)
10311 {
10312         struct drm_device *dev = crtc->base.dev;
10313         struct drm_i915_private *dev_priv = to_i915(dev);
10314         enum pipe pipe = crtc->pipe;
10315
10316         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10317         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10318         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10319                 & ~TU_SIZE_MASK;
10320         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10321         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10322                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10323 }
10324
10325 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10326                                          enum transcoder transcoder,
10327                                          struct intel_link_m_n *m_n,
10328                                          struct intel_link_m_n *m2_n2)
10329 {
10330         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10331         enum pipe pipe = crtc->pipe;
10332
10333         if (INTEL_GEN(dev_priv) >= 5) {
10334                 m_n->link_m = intel_de_read(dev_priv,
10335                                             PIPE_LINK_M1(transcoder));
10336                 m_n->link_n = intel_de_read(dev_priv,
10337                                             PIPE_LINK_N1(transcoder));
10338                 m_n->gmch_m = intel_de_read(dev_priv,
10339                                             PIPE_DATA_M1(transcoder))
10340                         & ~TU_SIZE_MASK;
10341                 m_n->gmch_n = intel_de_read(dev_priv,
10342                                             PIPE_DATA_N1(transcoder));
10343                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10344                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10345
10346                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10347                         m2_n2->link_m = intel_de_read(dev_priv,
10348                                                       PIPE_LINK_M2(transcoder));
10349                         m2_n2->link_n = intel_de_read(dev_priv,
10350                                                              PIPE_LINK_N2(transcoder));
10351                         m2_n2->gmch_m = intel_de_read(dev_priv,
10352                                                              PIPE_DATA_M2(transcoder))
10353                                         & ~TU_SIZE_MASK;
10354                         m2_n2->gmch_n = intel_de_read(dev_priv,
10355                                                              PIPE_DATA_N2(transcoder));
10356                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10357                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10358                 }
10359         } else {
10360                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10361                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10362                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10363                         & ~TU_SIZE_MASK;
10364                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10365                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10366                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10367         }
10368 }
10369
10370 void intel_dp_get_m_n(struct intel_crtc *crtc,
10371                       struct intel_crtc_state *pipe_config)
10372 {
10373         if (pipe_config->has_pch_encoder)
10374                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10375         else
10376                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10377                                              &pipe_config->dp_m_n,
10378                                              &pipe_config->dp_m2_n2);
10379 }
10380
10381 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10382                                    struct intel_crtc_state *pipe_config)
10383 {
10384         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10385                                      &pipe_config->fdi_m_n, NULL);
10386 }
10387
10388 static void skl_get_pfit_config(struct intel_crtc *crtc,
10389                                 struct intel_crtc_state *pipe_config)
10390 {
10391         struct drm_device *dev = crtc->base.dev;
10392         struct drm_i915_private *dev_priv = to_i915(dev);
10393         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
10394         u32 ps_ctrl = 0;
10395         int id = -1;
10396         int i;
10397
10398         /* find scaler attached to this pipe */
10399         for (i = 0; i < crtc->num_scalers; i++) {
10400                 ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10401                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
10402                         id = i;
10403                         pipe_config->pch_pfit.enabled = true;
10404                         pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
10405                                                                   SKL_PS_WIN_POS(crtc->pipe, i));
10406                         pipe_config->pch_pfit.size = intel_de_read(dev_priv,
10407                                                                    SKL_PS_WIN_SZ(crtc->pipe, i));
10408                         scaler_state->scalers[i].in_use = true;
10409                         break;
10410                 }
10411         }
10412
10413         scaler_state->scaler_id = id;
10414         if (id >= 0) {
10415                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10416         } else {
10417                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10418         }
10419 }
10420
10421 static void
10422 skl_get_initial_plane_config(struct intel_crtc *crtc,
10423                              struct intel_initial_plane_config *plane_config)
10424 {
10425         struct drm_device *dev = crtc->base.dev;
10426         struct drm_i915_private *dev_priv = to_i915(dev);
10427         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10428         enum plane_id plane_id = plane->id;
10429         enum pipe pipe;
10430         u32 val, base, offset, stride_mult, tiling, alpha;
10431         int fourcc, pixel_format;
10432         unsigned int aligned_height;
10433         struct drm_framebuffer *fb;
10434         struct intel_framebuffer *intel_fb;
10435
10436         if (!plane->get_hw_state(plane, &pipe))
10437                 return;
10438
10439         drm_WARN_ON(dev, pipe != crtc->pipe);
10440
10441         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10442         if (!intel_fb) {
10443                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10444                 return;
10445         }
10446
10447         fb = &intel_fb->base;
10448
10449         fb->dev = dev;
10450
10451         val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10452
10453         if (INTEL_GEN(dev_priv) >= 11)
10454                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10455         else
10456                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10457
10458         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10459                 alpha = intel_de_read(dev_priv,
10460                                       PLANE_COLOR_CTL(pipe, plane_id));
10461                 alpha &= PLANE_COLOR_ALPHA_MASK;
10462         } else {
10463                 alpha = val & PLANE_CTL_ALPHA_MASK;
10464         }
10465
10466         fourcc = skl_format_to_fourcc(pixel_format,
10467                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10468         fb->format = drm_format_info(fourcc);
10469
10470         tiling = val & PLANE_CTL_TILED_MASK;
10471         switch (tiling) {
10472         case PLANE_CTL_TILED_LINEAR:
10473                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10474                 break;
10475         case PLANE_CTL_TILED_X:
10476                 plane_config->tiling = I915_TILING_X;
10477                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10478                 break;
10479         case PLANE_CTL_TILED_Y:
10480                 plane_config->tiling = I915_TILING_Y;
10481                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10482                         fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10483                                 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10484                                 I915_FORMAT_MOD_Y_TILED_CCS;
10485                 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10486                         fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10487                 else
10488                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10489                 break;
10490         case PLANE_CTL_TILED_YF:
10491                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10492                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10493                 else
10494                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10495                 break;
10496         default:
10497                 MISSING_CASE(tiling);
10498                 goto error;
10499         }
10500
10501         /*
10502          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10503          * while i915 HW rotation is clockwise, thats why this swapping.
10504          */
10505         switch (val & PLANE_CTL_ROTATE_MASK) {
10506         case PLANE_CTL_ROTATE_0:
10507                 plane_config->rotation = DRM_MODE_ROTATE_0;
10508                 break;
10509         case PLANE_CTL_ROTATE_90:
10510                 plane_config->rotation = DRM_MODE_ROTATE_270;
10511                 break;
10512         case PLANE_CTL_ROTATE_180:
10513                 plane_config->rotation = DRM_MODE_ROTATE_180;
10514                 break;
10515         case PLANE_CTL_ROTATE_270:
10516                 plane_config->rotation = DRM_MODE_ROTATE_90;
10517                 break;
10518         }
10519
10520         if (INTEL_GEN(dev_priv) >= 10 &&
10521             val & PLANE_CTL_FLIP_HORIZONTAL)
10522                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10523
10524         base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10525         plane_config->base = base;
10526
10527         offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10528
10529         val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10530         fb->height = ((val >> 16) & 0xffff) + 1;
10531         fb->width = ((val >> 0) & 0xffff) + 1;
10532
10533         val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10534         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10535         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10536
10537         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10538
10539         plane_config->size = fb->pitches[0] * aligned_height;
10540
10541         drm_dbg_kms(&dev_priv->drm,
10542                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10543                     crtc->base.name, plane->base.name, fb->width, fb->height,
10544                     fb->format->cpp[0] * 8, base, fb->pitches[0],
10545                     plane_config->size);
10546
10547         plane_config->fb = intel_fb;
10548         return;
10549
10550 error:
10551         kfree(intel_fb);
10552 }
10553
10554 static void ilk_get_pfit_config(struct intel_crtc *crtc,
10555                                 struct intel_crtc_state *pipe_config)
10556 {
10557         struct drm_device *dev = crtc->base.dev;
10558         struct drm_i915_private *dev_priv = to_i915(dev);
10559         u32 tmp;
10560
10561         tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10562
10563         if (tmp & PF_ENABLE) {
10564                 pipe_config->pch_pfit.enabled = true;
10565                 pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
10566                                                           PF_WIN_POS(crtc->pipe));
10567                 pipe_config->pch_pfit.size = intel_de_read(dev_priv,
10568                                                            PF_WIN_SZ(crtc->pipe));
10569
10570                 /* We currently do not free assignements of panel fitters on
10571                  * ivb/hsw (since we don't use the higher upscaling modes which
10572                  * differentiates them) so just WARN about this case for now. */
10573                 if (IS_GEN(dev_priv, 7)) {
10574                         drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
10575                                     PF_PIPE_SEL_IVB(crtc->pipe));
10576                 }
10577         }
10578 }
10579
10580 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10581                                 struct intel_crtc_state *pipe_config)
10582 {
10583         struct drm_device *dev = crtc->base.dev;
10584         struct drm_i915_private *dev_priv = to_i915(dev);
10585         enum intel_display_power_domain power_domain;
10586         intel_wakeref_t wakeref;
10587         u32 tmp;
10588         bool ret;
10589
10590         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10591         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10592         if (!wakeref)
10593                 return false;
10594
10595         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10596         pipe_config->shared_dpll = NULL;
10597
10598         ret = false;
10599         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10600         if (!(tmp & PIPECONF_ENABLE))
10601                 goto out;
10602
10603         switch (tmp & PIPECONF_BPC_MASK) {
10604         case PIPECONF_6BPC:
10605                 pipe_config->pipe_bpp = 18;
10606                 break;
10607         case PIPECONF_8BPC:
10608                 pipe_config->pipe_bpp = 24;
10609                 break;
10610         case PIPECONF_10BPC:
10611                 pipe_config->pipe_bpp = 30;
10612                 break;
10613         case PIPECONF_12BPC:
10614                 pipe_config->pipe_bpp = 36;
10615                 break;
10616         default:
10617                 break;
10618         }
10619
10620         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10621                 pipe_config->limited_color_range = true;
10622
10623         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10624         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10625         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10626                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10627                 break;
10628         default:
10629                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10630                 break;
10631         }
10632
10633         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10634                 PIPECONF_GAMMA_MODE_SHIFT;
10635
10636         pipe_config->csc_mode = intel_de_read(dev_priv,
10637                                               PIPE_CSC_MODE(crtc->pipe));
10638
10639         i9xx_get_pipe_color_config(pipe_config);
10640         intel_color_get_config(pipe_config);
10641
10642         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10643                 struct intel_shared_dpll *pll;
10644                 enum intel_dpll_id pll_id;
10645
10646                 pipe_config->has_pch_encoder = true;
10647
10648                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10649                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10650                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10651
10652                 ilk_get_fdi_m_n_config(crtc, pipe_config);
10653
10654                 if (HAS_PCH_IBX(dev_priv)) {
10655                         /*
10656                          * The pipe->pch transcoder and pch transcoder->pll
10657                          * mapping is fixed.
10658                          */
10659                         pll_id = (enum intel_dpll_id) crtc->pipe;
10660                 } else {
10661                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10662                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10663                                 pll_id = DPLL_ID_PCH_PLL_B;
10664                         else
10665                                 pll_id= DPLL_ID_PCH_PLL_A;
10666                 }
10667
10668                 pipe_config->shared_dpll =
10669                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10670                 pll = pipe_config->shared_dpll;
10671
10672                 drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
10673                                                  &pipe_config->dpll_hw_state));
10674
10675                 tmp = pipe_config->dpll_hw_state.dpll;
10676                 pipe_config->pixel_multiplier =
10677                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10678                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10679
10680                 ilk_pch_clock_get(crtc, pipe_config);
10681         } else {
10682                 pipe_config->pixel_multiplier = 1;
10683         }
10684
10685         intel_get_pipe_timings(crtc, pipe_config);
10686         intel_get_pipe_src_size(crtc, pipe_config);
10687
10688         ilk_get_pfit_config(crtc, pipe_config);
10689
10690         ret = true;
10691
10692 out:
10693         intel_display_power_put(dev_priv, power_domain, wakeref);
10694
10695         return ret;
10696 }
10697
10698 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
10699                                   struct intel_crtc_state *crtc_state)
10700 {
10701         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10702         struct intel_atomic_state *state =
10703                 to_intel_atomic_state(crtc_state->uapi.state);
10704
10705         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10706             INTEL_GEN(dev_priv) >= 11) {
10707                 struct intel_encoder *encoder =
10708                         intel_get_crtc_new_encoder(state, crtc_state);
10709
10710                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10711                         drm_dbg_kms(&dev_priv->drm,
10712                                     "failed to find PLL for pipe %c\n",
10713                                     pipe_name(crtc->pipe));
10714                         return -EINVAL;
10715                 }
10716         }
10717
10718         return 0;
10719 }
10720
10721 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10722                             struct intel_crtc_state *pipe_config)
10723 {
10724         enum intel_dpll_id id;
10725         u32 temp;
10726
10727         temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10728         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10729
10730         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
10731                 return;
10732
10733         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10734 }
10735
10736 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10737                             struct intel_crtc_state *pipe_config)
10738 {
10739         enum phy phy = intel_port_to_phy(dev_priv, port);
10740         enum icl_port_dpll_id port_dpll_id;
10741         enum intel_dpll_id id;
10742         u32 temp;
10743
10744         if (intel_phy_is_combo(dev_priv, phy)) {
10745                 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
10746                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10747                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10748                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10749         } else if (intel_phy_is_tc(dev_priv, phy)) {
10750                 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10751
10752                 if (clk_sel == DDI_CLK_SEL_MG) {
10753                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10754                                                                     port));
10755                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10756                 } else {
10757                         drm_WARN_ON(&dev_priv->drm,
10758                                     clk_sel < DDI_CLK_SEL_TBT_162);
10759                         id = DPLL_ID_ICL_TBTPLL;
10760                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10761                 }
10762         } else {
10763                 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
10764                 return;
10765         }
10766
10767         pipe_config->icl_port_dplls[port_dpll_id].pll =
10768                 intel_get_shared_dpll_by_id(dev_priv, id);
10769
10770         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10771 }
10772
10773 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10774                                 enum port port,
10775                                 struct intel_crtc_state *pipe_config)
10776 {
10777         enum intel_dpll_id id;
10778
10779         switch (port) {
10780         case PORT_A:
10781                 id = DPLL_ID_SKL_DPLL0;
10782                 break;
10783         case PORT_B:
10784                 id = DPLL_ID_SKL_DPLL1;
10785                 break;
10786         case PORT_C:
10787                 id = DPLL_ID_SKL_DPLL2;
10788                 break;
10789         default:
10790                 drm_err(&dev_priv->drm, "Incorrect port type\n");
10791                 return;
10792         }
10793
10794         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10795 }
10796
10797 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10798                             struct intel_crtc_state *pipe_config)
10799 {
10800         enum intel_dpll_id id;
10801         u32 temp;
10802
10803         temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10804         id = temp >> (port * 3 + 1);
10805
10806         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
10807                 return;
10808
10809         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10810 }
10811
10812 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10813                             struct intel_crtc_state *pipe_config)
10814 {
10815         enum intel_dpll_id id;
10816         u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
10817
10818         switch (ddi_pll_sel) {
10819         case PORT_CLK_SEL_WRPLL1:
10820                 id = DPLL_ID_WRPLL1;
10821                 break;
10822         case PORT_CLK_SEL_WRPLL2:
10823                 id = DPLL_ID_WRPLL2;
10824                 break;
10825         case PORT_CLK_SEL_SPLL:
10826                 id = DPLL_ID_SPLL;
10827                 break;
10828         case PORT_CLK_SEL_LCPLL_810:
10829                 id = DPLL_ID_LCPLL_810;
10830                 break;
10831         case PORT_CLK_SEL_LCPLL_1350:
10832                 id = DPLL_ID_LCPLL_1350;
10833                 break;
10834         case PORT_CLK_SEL_LCPLL_2700:
10835                 id = DPLL_ID_LCPLL_2700;
10836                 break;
10837         default:
10838                 MISSING_CASE(ddi_pll_sel);
10839                 /* fall through */
10840         case PORT_CLK_SEL_NONE:
10841                 return;
10842         }
10843
10844         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10845 }
10846
10847 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10848                                      struct intel_crtc_state *pipe_config,
10849                                      u64 *power_domain_mask,
10850                                      intel_wakeref_t *wakerefs)
10851 {
10852         struct drm_device *dev = crtc->base.dev;
10853         struct drm_i915_private *dev_priv = to_i915(dev);
10854         enum intel_display_power_domain power_domain;
10855         unsigned long panel_transcoder_mask = 0;
10856         unsigned long enabled_panel_transcoders = 0;
10857         enum transcoder panel_transcoder;
10858         intel_wakeref_t wf;
10859         u32 tmp;
10860
10861         if (INTEL_GEN(dev_priv) >= 11)
10862                 panel_transcoder_mask |=
10863                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10864
10865         if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
10866                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10867
10868         /*
10869          * The pipe->transcoder mapping is fixed with the exception of the eDP
10870          * and DSI transcoders handled below.
10871          */
10872         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10873
10874         /*
10875          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10876          * consistency and less surprising code; it's in always on power).
10877          */
10878         for_each_set_bit(panel_transcoder,
10879                          &panel_transcoder_mask,
10880                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10881                 bool force_thru = false;
10882                 enum pipe trans_pipe;
10883
10884                 tmp = intel_de_read(dev_priv,
10885                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
10886                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10887                         continue;
10888
10889                 /*
10890                  * Log all enabled ones, only use the first one.
10891                  *
10892                  * FIXME: This won't work for two separate DSI displays.
10893                  */
10894                 enabled_panel_transcoders |= BIT(panel_transcoder);
10895                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10896                         continue;
10897
10898                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10899                 default:
10900                         drm_WARN(dev, 1,
10901                                  "unknown pipe linked to transcoder %s\n",
10902                                  transcoder_name(panel_transcoder));
10903                         /* fall through */
10904                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10905                         force_thru = true;
10906                         /* fall through */
10907                 case TRANS_DDI_EDP_INPUT_A_ON:
10908                         trans_pipe = PIPE_A;
10909                         break;
10910                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10911                         trans_pipe = PIPE_B;
10912                         break;
10913                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10914                         trans_pipe = PIPE_C;
10915                         break;
10916                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
10917                         trans_pipe = PIPE_D;
10918                         break;
10919                 }
10920
10921                 if (trans_pipe == crtc->pipe) {
10922                         pipe_config->cpu_transcoder = panel_transcoder;
10923                         pipe_config->pch_pfit.force_thru = force_thru;
10924                 }
10925         }
10926
10927         /*
10928          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10929          */
10930         drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10931                     enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10932
10933         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10934         drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
10935
10936         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10937         if (!wf)
10938                 return false;
10939
10940         wakerefs[power_domain] = wf;
10941         *power_domain_mask |= BIT_ULL(power_domain);
10942
10943         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
10944
10945         return tmp & PIPECONF_ENABLE;
10946 }
10947
10948 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10949                                          struct intel_crtc_state *pipe_config,
10950                                          u64 *power_domain_mask,
10951                                          intel_wakeref_t *wakerefs)
10952 {
10953         struct drm_device *dev = crtc->base.dev;
10954         struct drm_i915_private *dev_priv = to_i915(dev);
10955         enum intel_display_power_domain power_domain;
10956         enum transcoder cpu_transcoder;
10957         intel_wakeref_t wf;
10958         enum port port;
10959         u32 tmp;
10960
10961         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10962                 if (port == PORT_A)
10963                         cpu_transcoder = TRANSCODER_DSI_A;
10964                 else
10965                         cpu_transcoder = TRANSCODER_DSI_C;
10966
10967                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10968                 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
10969
10970                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10971                 if (!wf)
10972                         continue;
10973
10974                 wakerefs[power_domain] = wf;
10975                 *power_domain_mask |= BIT_ULL(power_domain);
10976
10977                 /*
10978                  * The PLL needs to be enabled with a valid divider
10979                  * configuration, otherwise accessing DSI registers will hang
10980                  * the machine. See BSpec North Display Engine
10981                  * registers/MIPI[BXT]. We can break out here early, since we
10982                  * need the same DSI PLL to be enabled for both DSI ports.
10983                  */
10984                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10985                         break;
10986
10987                 /* XXX: this works for video mode only */
10988                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
10989                 if (!(tmp & DPI_ENABLE))
10990                         continue;
10991
10992                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
10993                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10994                         continue;
10995
10996                 pipe_config->cpu_transcoder = cpu_transcoder;
10997                 break;
10998         }
10999
11000         return transcoder_is_dsi(pipe_config->cpu_transcoder);
11001 }
11002
11003 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
11004                                    struct intel_crtc_state *pipe_config)
11005 {
11006         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11007         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
11008         struct intel_shared_dpll *pll;
11009         enum port port;
11010         u32 tmp;
11011
11012         if (transcoder_is_dsi(cpu_transcoder)) {
11013                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
11014                                                 PORT_A : PORT_B;
11015         } else {
11016                 tmp = intel_de_read(dev_priv,
11017                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
11018                 if (INTEL_GEN(dev_priv) >= 12)
11019                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11020                 else
11021                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11022         }
11023
11024         if (INTEL_GEN(dev_priv) >= 11)
11025                 icl_get_ddi_pll(dev_priv, port, pipe_config);
11026         else if (IS_CANNONLAKE(dev_priv))
11027                 cnl_get_ddi_pll(dev_priv, port, pipe_config);
11028         else if (IS_GEN9_BC(dev_priv))
11029                 skl_get_ddi_pll(dev_priv, port, pipe_config);
11030         else if (IS_GEN9_LP(dev_priv))
11031                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
11032         else
11033                 hsw_get_ddi_pll(dev_priv, port, pipe_config);
11034
11035         pll = pipe_config->shared_dpll;
11036         if (pll) {
11037                 drm_WARN_ON(&dev_priv->drm,
11038                             !pll->info->funcs->get_hw_state(dev_priv, pll,
11039                                                 &pipe_config->dpll_hw_state));
11040         }
11041
11042         /*
11043          * Haswell has only FDI/PCH transcoder A. It is which is connected to
11044          * DDI E. So just check whether this pipe is wired to DDI E and whether
11045          * the PCH transcoder is on.
11046          */
11047         if (INTEL_GEN(dev_priv) < 9 &&
11048             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
11049                 pipe_config->has_pch_encoder = true;
11050
11051                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
11052                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
11053                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
11054
11055                 ilk_get_fdi_m_n_config(crtc, pipe_config);
11056         }
11057 }
11058
11059 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
11060                                 struct intel_crtc_state *pipe_config)
11061 {
11062         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11063         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
11064         enum intel_display_power_domain power_domain;
11065         u64 power_domain_mask;
11066         bool active;
11067         u32 tmp;
11068
11069         pipe_config->master_transcoder = INVALID_TRANSCODER;
11070
11071         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
11072         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11073         if (!wf)
11074                 return false;
11075
11076         wakerefs[power_domain] = wf;
11077         power_domain_mask = BIT_ULL(power_domain);
11078
11079         pipe_config->shared_dpll = NULL;
11080
11081         active = hsw_get_transcoder_state(crtc, pipe_config,
11082                                           &power_domain_mask, wakerefs);
11083
11084         if (IS_GEN9_LP(dev_priv) &&
11085             bxt_get_dsi_transcoder_state(crtc, pipe_config,
11086                                          &power_domain_mask, wakerefs)) {
11087                 drm_WARN_ON(&dev_priv->drm, active);
11088                 active = true;
11089         }
11090
11091         if (!active)
11092                 goto out;
11093
11094         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
11095             INTEL_GEN(dev_priv) >= 11) {
11096                 hsw_get_ddi_port_state(crtc, pipe_config);
11097                 intel_get_pipe_timings(crtc, pipe_config);
11098         }
11099
11100         intel_get_pipe_src_size(crtc, pipe_config);
11101
11102         if (IS_HASWELL(dev_priv)) {
11103                 u32 tmp = intel_de_read(dev_priv,
11104                                         PIPECONF(pipe_config->cpu_transcoder));
11105
11106                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
11107                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
11108                 else
11109                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
11110         } else {
11111                 pipe_config->output_format =
11112                         bdw_get_pipemisc_output_format(crtc);
11113
11114                 /*
11115                  * Currently there is no interface defined to
11116                  * check user preference between RGB/YCBCR444
11117                  * or YCBCR420. So the only possible case for
11118                  * YCBCR444 usage is driving YCBCR420 output
11119                  * with LSPCON, when pipe is configured for
11120                  * YCBCR444 output and LSPCON takes care of
11121                  * downsampling it.
11122                  */
11123                 pipe_config->lspcon_downsampling =
11124                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
11125         }
11126
11127         pipe_config->gamma_mode = intel_de_read(dev_priv,
11128                                                 GAMMA_MODE(crtc->pipe));
11129
11130         pipe_config->csc_mode = intel_de_read(dev_priv,
11131                                               PIPE_CSC_MODE(crtc->pipe));
11132
11133         if (INTEL_GEN(dev_priv) >= 9) {
11134                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
11135
11136                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
11137                         pipe_config->gamma_enable = true;
11138
11139                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
11140                         pipe_config->csc_enable = true;
11141         } else {
11142                 i9xx_get_pipe_color_config(pipe_config);
11143         }
11144
11145         intel_color_get_config(pipe_config);
11146
11147         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11148         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11149         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11150                 pipe_config->ips_linetime =
11151                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11152
11153         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
11154         drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
11155
11156         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11157         if (wf) {
11158                 wakerefs[power_domain] = wf;
11159                 power_domain_mask |= BIT_ULL(power_domain);
11160
11161                 if (INTEL_GEN(dev_priv) >= 9)
11162                         skl_get_pfit_config(crtc, pipe_config);
11163                 else
11164                         ilk_get_pfit_config(crtc, pipe_config);
11165         }
11166
11167         if (hsw_crtc_supports_ips(crtc)) {
11168                 if (IS_HASWELL(dev_priv))
11169                         pipe_config->ips_enabled = intel_de_read(dev_priv,
11170                                                                  IPS_CTL) & IPS_ENABLE;
11171                 else {
11172                         /*
11173                          * We cannot readout IPS state on broadwell, set to
11174                          * true so we can set it to a defined state on first
11175                          * commit.
11176                          */
11177                         pipe_config->ips_enabled = true;
11178                 }
11179         }
11180
11181         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11182             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11183                 pipe_config->pixel_multiplier =
11184                         intel_de_read(dev_priv,
11185                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11186         } else {
11187                 pipe_config->pixel_multiplier = 1;
11188         }
11189
11190 out:
11191         for_each_power_domain(power_domain, power_domain_mask)
11192                 intel_display_power_put(dev_priv,
11193                                         power_domain, wakerefs[power_domain]);
11194
11195         return active;
11196 }
11197
11198 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
11199 {
11200         struct drm_i915_private *dev_priv =
11201                 to_i915(plane_state->uapi.plane->dev);
11202         const struct drm_framebuffer *fb = plane_state->hw.fb;
11203         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11204         u32 base;
11205
11206         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
11207                 base = sg_dma_address(obj->mm.pages->sgl);
11208         else
11209                 base = intel_plane_ggtt_offset(plane_state);
11210
11211         return base + plane_state->color_plane[0].offset;
11212 }
11213
11214 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
11215 {
11216         int x = plane_state->uapi.dst.x1;
11217         int y = plane_state->uapi.dst.y1;
11218         u32 pos = 0;
11219
11220         if (x < 0) {
11221                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
11222                 x = -x;
11223         }
11224         pos |= x << CURSOR_X_SHIFT;
11225
11226         if (y < 0) {
11227                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
11228                 y = -y;
11229         }
11230         pos |= y << CURSOR_Y_SHIFT;
11231
11232         return pos;
11233 }
11234
11235 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
11236 {
11237         const struct drm_mode_config *config =
11238                 &plane_state->uapi.plane->dev->mode_config;
11239         int width = drm_rect_width(&plane_state->uapi.dst);
11240         int height = drm_rect_height(&plane_state->uapi.dst);
11241
11242         return width > 0 && width <= config->cursor_width &&
11243                 height > 0 && height <= config->cursor_height;
11244 }
11245
11246 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
11247 {
11248         struct drm_i915_private *dev_priv =
11249                 to_i915(plane_state->uapi.plane->dev);
11250         unsigned int rotation = plane_state->hw.rotation;
11251         int src_x, src_y;
11252         u32 offset;
11253         int ret;
11254
11255         ret = intel_plane_compute_gtt(plane_state);
11256         if (ret)
11257                 return ret;
11258
11259         if (!plane_state->uapi.visible)
11260                 return 0;
11261
11262         src_x = plane_state->uapi.src.x1 >> 16;
11263         src_y = plane_state->uapi.src.y1 >> 16;
11264
11265         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
11266         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
11267                                                     plane_state, 0);
11268
11269         if (src_x != 0 || src_y != 0) {
11270                 drm_dbg_kms(&dev_priv->drm,
11271                             "Arbitrary cursor panning not supported\n");
11272                 return -EINVAL;
11273         }
11274
11275         /*
11276          * Put the final coordinates back so that the src
11277          * coordinate checks will see the right values.
11278          */
11279         drm_rect_translate_to(&plane_state->uapi.src,
11280                               src_x << 16, src_y << 16);
11281
11282         /* ILK+ do this automagically in hardware */
11283         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
11284                 const struct drm_framebuffer *fb = plane_state->hw.fb;
11285                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
11286                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
11287
11288                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
11289         }
11290
11291         plane_state->color_plane[0].offset = offset;
11292         plane_state->color_plane[0].x = src_x;
11293         plane_state->color_plane[0].y = src_y;
11294
11295         return 0;
11296 }
11297
11298 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11299                               struct intel_plane_state *plane_state)
11300 {
11301         const struct drm_framebuffer *fb = plane_state->hw.fb;
11302         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11303         int ret;
11304
11305         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11306                 drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
11307                 return -EINVAL;
11308         }
11309
11310         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
11311                                                   &crtc_state->uapi,
11312                                                   DRM_PLANE_HELPER_NO_SCALING,
11313                                                   DRM_PLANE_HELPER_NO_SCALING,
11314                                                   true, true);
11315         if (ret)
11316                 return ret;
11317
11318         /* Use the unclipped src/dst rectangles, which we program to hw */
11319         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
11320         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
11321
11322         ret = intel_cursor_check_surface(plane_state);
11323         if (ret)
11324                 return ret;
11325
11326         if (!plane_state->uapi.visible)
11327                 return 0;
11328
11329         ret = intel_plane_check_src_coordinates(plane_state);
11330         if (ret)
11331                 return ret;
11332
11333         return 0;
11334 }
11335
11336 static unsigned int
11337 i845_cursor_max_stride(struct intel_plane *plane,
11338                        u32 pixel_format, u64 modifier,
11339                        unsigned int rotation)
11340 {
11341         return 2048;
11342 }
11343
11344 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11345 {
11346         u32 cntl = 0;
11347
11348         if (crtc_state->gamma_enable)
11349                 cntl |= CURSOR_GAMMA_ENABLE;
11350
11351         return cntl;
11352 }
11353
11354 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11355                            const struct intel_plane_state *plane_state)
11356 {
11357         return CURSOR_ENABLE |
11358                 CURSOR_FORMAT_ARGB |
11359                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
11360 }
11361
11362 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11363 {
11364         int width = drm_rect_width(&plane_state->uapi.dst);
11365
11366         /*
11367          * 845g/865g are only limited by the width of their cursors,
11368          * the height is arbitrary up to the precision of the register.
11369          */
11370         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11371 }
11372
11373 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11374                              struct intel_plane_state *plane_state)
11375 {
11376         const struct drm_framebuffer *fb = plane_state->hw.fb;
11377         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11378         int ret;
11379
11380         ret = intel_check_cursor(crtc_state, plane_state);
11381         if (ret)
11382                 return ret;
11383
11384         /* if we want to turn off the cursor ignore width and height */
11385         if (!fb)
11386                 return 0;
11387
11388         /* Check for which cursor types we support */
11389         if (!i845_cursor_size_ok(plane_state)) {
11390                 drm_dbg_kms(&i915->drm,
11391                             "Cursor dimension %dx%d not supported\n",
11392                             drm_rect_width(&plane_state->uapi.dst),
11393                             drm_rect_height(&plane_state->uapi.dst));
11394                 return -EINVAL;
11395         }
11396
11397         drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
11398                     plane_state->color_plane[0].stride != fb->pitches[0]);
11399
11400         switch (fb->pitches[0]) {
11401         case 256:
11402         case 512:
11403         case 1024:
11404         case 2048:
11405                 break;
11406         default:
11407                  drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
11408                              fb->pitches[0]);
11409                 return -EINVAL;
11410         }
11411
11412         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11413
11414         return 0;
11415 }
11416
11417 static void i845_update_cursor(struct intel_plane *plane,
11418                                const struct intel_crtc_state *crtc_state,
11419                                const struct intel_plane_state *plane_state)
11420 {
11421         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11422         u32 cntl = 0, base = 0, pos = 0, size = 0;
11423         unsigned long irqflags;
11424
11425         if (plane_state && plane_state->uapi.visible) {
11426                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11427                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11428
11429                 cntl = plane_state->ctl |
11430                         i845_cursor_ctl_crtc(crtc_state);
11431
11432                 size = (height << 12) | width;
11433
11434                 base = intel_cursor_base(plane_state);
11435                 pos = intel_cursor_position(plane_state);
11436         }
11437
11438         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11439
11440         /* On these chipsets we can only modify the base/size/stride
11441          * whilst the cursor is disabled.
11442          */
11443         if (plane->cursor.base != base ||
11444             plane->cursor.size != size ||
11445             plane->cursor.cntl != cntl) {
11446                 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
11447                 intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
11448                 intel_de_write_fw(dev_priv, CURSIZE, size);
11449                 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11450                 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
11451
11452                 plane->cursor.base = base;
11453                 plane->cursor.size = size;
11454                 plane->cursor.cntl = cntl;
11455         } else {
11456                 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11457         }
11458
11459         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11460 }
11461
11462 static void i845_disable_cursor(struct intel_plane *plane,
11463                                 const struct intel_crtc_state *crtc_state)
11464 {
11465         i845_update_cursor(plane, crtc_state, NULL);
11466 }
11467
11468 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11469                                      enum pipe *pipe)
11470 {
11471         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11472         enum intel_display_power_domain power_domain;
11473         intel_wakeref_t wakeref;
11474         bool ret;
11475
11476         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11477         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11478         if (!wakeref)
11479                 return false;
11480
11481         ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11482
11483         *pipe = PIPE_A;
11484
11485         intel_display_power_put(dev_priv, power_domain, wakeref);
11486
11487         return ret;
11488 }
11489
11490 static unsigned int
11491 i9xx_cursor_max_stride(struct intel_plane *plane,
11492                        u32 pixel_format, u64 modifier,
11493                        unsigned int rotation)
11494 {
11495         return plane->base.dev->mode_config.cursor_width * 4;
11496 }
11497
11498 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11499 {
11500         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11501         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11502         u32 cntl = 0;
11503
11504         if (INTEL_GEN(dev_priv) >= 11)
11505                 return cntl;
11506
11507         if (crtc_state->gamma_enable)
11508                 cntl = MCURSOR_GAMMA_ENABLE;
11509
11510         if (crtc_state->csc_enable)
11511                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11512
11513         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11514                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11515
11516         return cntl;
11517 }
11518
11519 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11520                            const struct intel_plane_state *plane_state)
11521 {
11522         struct drm_i915_private *dev_priv =
11523                 to_i915(plane_state->uapi.plane->dev);
11524         u32 cntl = 0;
11525
11526         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11527                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11528
11529         switch (drm_rect_width(&plane_state->uapi.dst)) {
11530         case 64:
11531                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11532                 break;
11533         case 128:
11534                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11535                 break;
11536         case 256:
11537                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11538                 break;
11539         default:
11540                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11541                 return 0;
11542         }
11543
11544         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11545                 cntl |= MCURSOR_ROTATE_180;
11546
11547         return cntl;
11548 }
11549
11550 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11551 {
11552         struct drm_i915_private *dev_priv =
11553                 to_i915(plane_state->uapi.plane->dev);
11554         int width = drm_rect_width(&plane_state->uapi.dst);
11555         int height = drm_rect_height(&plane_state->uapi.dst);
11556
11557         if (!intel_cursor_size_ok(plane_state))
11558                 return false;
11559
11560         /* Cursor width is limited to a few power-of-two sizes */
11561         switch (width) {
11562         case 256:
11563         case 128:
11564         case 64:
11565                 break;
11566         default:
11567                 return false;
11568         }
11569
11570         /*
11571          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11572          * height from 8 lines up to the cursor width, when the
11573          * cursor is not rotated. Everything else requires square
11574          * cursors.
11575          */
11576         if (HAS_CUR_FBC(dev_priv) &&
11577             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11578                 if (height < 8 || height > width)
11579                         return false;
11580         } else {
11581                 if (height != width)
11582                         return false;
11583         }
11584
11585         return true;
11586 }
11587
11588 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11589                              struct intel_plane_state *plane_state)
11590 {
11591         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11592         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11593         const struct drm_framebuffer *fb = plane_state->hw.fb;
11594         enum pipe pipe = plane->pipe;
11595         int ret;
11596
11597         ret = intel_check_cursor(crtc_state, plane_state);
11598         if (ret)
11599                 return ret;
11600
11601         /* if we want to turn off the cursor ignore width and height */
11602         if (!fb)
11603                 return 0;
11604
11605         /* Check for which cursor types we support */
11606         if (!i9xx_cursor_size_ok(plane_state)) {
11607                 drm_dbg(&dev_priv->drm,
11608                         "Cursor dimension %dx%d not supported\n",
11609                         drm_rect_width(&plane_state->uapi.dst),
11610                         drm_rect_height(&plane_state->uapi.dst));
11611                 return -EINVAL;
11612         }
11613
11614         drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
11615                     plane_state->color_plane[0].stride != fb->pitches[0]);
11616
11617         if (fb->pitches[0] !=
11618             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11619                 drm_dbg_kms(&dev_priv->drm,
11620                             "Invalid cursor stride (%u) (cursor width %d)\n",
11621                             fb->pitches[0],
11622                             drm_rect_width(&plane_state->uapi.dst));
11623                 return -EINVAL;
11624         }
11625
11626         /*
11627          * There's something wrong with the cursor on CHV pipe C.
11628          * If it straddles the left edge of the screen then
11629          * moving it away from the edge or disabling it often
11630          * results in a pipe underrun, and often that can lead to
11631          * dead pipe (constant underrun reported, and it scans
11632          * out just a solid color). To recover from that, the
11633          * display power well must be turned off and on again.
11634          * Refuse the put the cursor into that compromised position.
11635          */
11636         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11637             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11638                 drm_dbg_kms(&dev_priv->drm,
11639                             "CHV cursor C not allowed to straddle the left screen edge\n");
11640                 return -EINVAL;
11641         }
11642
11643         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11644
11645         return 0;
11646 }
11647
11648 static void i9xx_update_cursor(struct intel_plane *plane,
11649                                const struct intel_crtc_state *crtc_state,
11650                                const struct intel_plane_state *plane_state)
11651 {
11652         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11653         enum pipe pipe = plane->pipe;
11654         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11655         unsigned long irqflags;
11656
11657         if (plane_state && plane_state->uapi.visible) {
11658                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11659                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11660
11661                 cntl = plane_state->ctl |
11662                         i9xx_cursor_ctl_crtc(crtc_state);
11663
11664                 if (width != height)
11665                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11666
11667                 base = intel_cursor_base(plane_state);
11668                 pos = intel_cursor_position(plane_state);
11669         }
11670
11671         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11672
11673         /*
11674          * On some platforms writing CURCNTR first will also
11675          * cause CURPOS to be armed by the CURBASE write.
11676          * Without the CURCNTR write the CURPOS write would
11677          * arm itself. Thus we always update CURCNTR before
11678          * CURPOS.
11679          *
11680          * On other platforms CURPOS always requires the
11681          * CURBASE write to arm the update. Additonally
11682          * a write to any of the cursor register will cancel
11683          * an already armed cursor update. Thus leaving out
11684          * the CURBASE write after CURPOS could lead to a
11685          * cursor that doesn't appear to move, or even change
11686          * shape. Thus we always write CURBASE.
11687          *
11688          * The other registers are armed by by the CURBASE write
11689          * except when the plane is getting enabled at which time
11690          * the CURCNTR write arms the update.
11691          */
11692
11693         if (INTEL_GEN(dev_priv) >= 9)
11694                 skl_write_cursor_wm(plane, crtc_state);
11695
11696         if (plane->cursor.base != base ||
11697             plane->cursor.size != fbc_ctl ||
11698             plane->cursor.cntl != cntl) {
11699                 if (HAS_CUR_FBC(dev_priv))
11700                         intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
11701                                           fbc_ctl);
11702                 intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
11703                 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11704                 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11705
11706                 plane->cursor.base = base;
11707                 plane->cursor.size = fbc_ctl;
11708                 plane->cursor.cntl = cntl;
11709         } else {
11710                 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11711                 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11712         }
11713
11714         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11715 }
11716
11717 static void i9xx_disable_cursor(struct intel_plane *plane,
11718                                 const struct intel_crtc_state *crtc_state)
11719 {
11720         i9xx_update_cursor(plane, crtc_state, NULL);
11721 }
11722
11723 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11724                                      enum pipe *pipe)
11725 {
11726         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11727         enum intel_display_power_domain power_domain;
11728         intel_wakeref_t wakeref;
11729         bool ret;
11730         u32 val;
11731
11732         /*
11733          * Not 100% correct for planes that can move between pipes,
11734          * but that's only the case for gen2-3 which don't have any
11735          * display power wells.
11736          */
11737         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11738         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11739         if (!wakeref)
11740                 return false;
11741
11742         val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
11743
11744         ret = val & MCURSOR_MODE;
11745
11746         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11747                 *pipe = plane->pipe;
11748         else
11749                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11750                         MCURSOR_PIPE_SELECT_SHIFT;
11751
11752         intel_display_power_put(dev_priv, power_domain, wakeref);
11753
11754         return ret;
11755 }
11756
11757 /* VESA 640x480x72Hz mode to set on the pipe */
11758 static const struct drm_display_mode load_detect_mode = {
11759         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11760                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11761 };
11762
11763 struct drm_framebuffer *
11764 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11765                          struct drm_mode_fb_cmd2 *mode_cmd)
11766 {
11767         struct intel_framebuffer *intel_fb;
11768         int ret;
11769
11770         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11771         if (!intel_fb)
11772                 return ERR_PTR(-ENOMEM);
11773
11774         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11775         if (ret)
11776                 goto err;
11777
11778         return &intel_fb->base;
11779
11780 err:
11781         kfree(intel_fb);
11782         return ERR_PTR(ret);
11783 }
11784
11785 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11786                                         struct drm_crtc *crtc)
11787 {
11788         struct drm_plane *plane;
11789         struct drm_plane_state *plane_state;
11790         int ret, i;
11791
11792         ret = drm_atomic_add_affected_planes(state, crtc);
11793         if (ret)
11794                 return ret;
11795
11796         for_each_new_plane_in_state(state, plane, plane_state, i) {
11797                 if (plane_state->crtc != crtc)
11798                         continue;
11799
11800                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11801                 if (ret)
11802                         return ret;
11803
11804                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11805         }
11806
11807         return 0;
11808 }
11809
11810 int intel_get_load_detect_pipe(struct drm_connector *connector,
11811                                struct intel_load_detect_pipe *old,
11812                                struct drm_modeset_acquire_ctx *ctx)
11813 {
11814         struct intel_crtc *intel_crtc;
11815         struct intel_encoder *intel_encoder =
11816                 intel_attached_encoder(to_intel_connector(connector));
11817         struct drm_crtc *possible_crtc;
11818         struct drm_encoder *encoder = &intel_encoder->base;
11819         struct drm_crtc *crtc = NULL;
11820         struct drm_device *dev = encoder->dev;
11821         struct drm_i915_private *dev_priv = to_i915(dev);
11822         struct drm_mode_config *config = &dev->mode_config;
11823         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11824         struct drm_connector_state *connector_state;
11825         struct intel_crtc_state *crtc_state;
11826         int ret, i = -1;
11827
11828         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11829                     connector->base.id, connector->name,
11830                     encoder->base.id, encoder->name);
11831
11832         old->restore_state = NULL;
11833
11834         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
11835
11836         /*
11837          * Algorithm gets a little messy:
11838          *
11839          *   - if the connector already has an assigned crtc, use it (but make
11840          *     sure it's on first)
11841          *
11842          *   - try to find the first unused crtc that can drive this connector,
11843          *     and use that if we find one
11844          */
11845
11846         /* See if we already have a CRTC for this connector */
11847         if (connector->state->crtc) {
11848                 crtc = connector->state->crtc;
11849
11850                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11851                 if (ret)
11852                         goto fail;
11853
11854                 /* Make sure the crtc and connector are running */
11855                 goto found;
11856         }
11857
11858         /* Find an unused one (if possible) */
11859         for_each_crtc(dev, possible_crtc) {
11860                 i++;
11861                 if (!(encoder->possible_crtcs & (1 << i)))
11862                         continue;
11863
11864                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11865                 if (ret)
11866                         goto fail;
11867
11868                 if (possible_crtc->state->enable) {
11869                         drm_modeset_unlock(&possible_crtc->mutex);
11870                         continue;
11871                 }
11872
11873                 crtc = possible_crtc;
11874                 break;
11875         }
11876
11877         /*
11878          * If we didn't find an unused CRTC, don't use any.
11879          */
11880         if (!crtc) {
11881                 drm_dbg_kms(&dev_priv->drm,
11882                             "no pipe available for load-detect\n");
11883                 ret = -ENODEV;
11884                 goto fail;
11885         }
11886
11887 found:
11888         intel_crtc = to_intel_crtc(crtc);
11889
11890         state = drm_atomic_state_alloc(dev);
11891         restore_state = drm_atomic_state_alloc(dev);
11892         if (!state || !restore_state) {
11893                 ret = -ENOMEM;
11894                 goto fail;
11895         }
11896
11897         state->acquire_ctx = ctx;
11898         restore_state->acquire_ctx = ctx;
11899
11900         connector_state = drm_atomic_get_connector_state(state, connector);
11901         if (IS_ERR(connector_state)) {
11902                 ret = PTR_ERR(connector_state);
11903                 goto fail;
11904         }
11905
11906         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11907         if (ret)
11908                 goto fail;
11909
11910         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11911         if (IS_ERR(crtc_state)) {
11912                 ret = PTR_ERR(crtc_state);
11913                 goto fail;
11914         }
11915
11916         crtc_state->uapi.active = true;
11917
11918         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11919                                            &load_detect_mode);
11920         if (ret)
11921                 goto fail;
11922
11923         ret = intel_modeset_disable_planes(state, crtc);
11924         if (ret)
11925                 goto fail;
11926
11927         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11928         if (!ret)
11929                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11930         if (!ret)
11931                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11932         if (ret) {
11933                 drm_dbg_kms(&dev_priv->drm,
11934                             "Failed to create a copy of old state to restore: %i\n",
11935                             ret);
11936                 goto fail;
11937         }
11938
11939         ret = drm_atomic_commit(state);
11940         if (ret) {
11941                 drm_dbg_kms(&dev_priv->drm,
11942                             "failed to set mode on load-detect pipe\n");
11943                 goto fail;
11944         }
11945
11946         old->restore_state = restore_state;
11947         drm_atomic_state_put(state);
11948
11949         /* let the connector get through one full cycle before testing */
11950         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11951         return true;
11952
11953 fail:
11954         if (state) {
11955                 drm_atomic_state_put(state);
11956                 state = NULL;
11957         }
11958         if (restore_state) {
11959                 drm_atomic_state_put(restore_state);
11960                 restore_state = NULL;
11961         }
11962
11963         if (ret == -EDEADLK)
11964                 return ret;
11965
11966         return false;
11967 }
11968
11969 void intel_release_load_detect_pipe(struct drm_connector *connector,
11970                                     struct intel_load_detect_pipe *old,
11971                                     struct drm_modeset_acquire_ctx *ctx)
11972 {
11973         struct intel_encoder *intel_encoder =
11974                 intel_attached_encoder(to_intel_connector(connector));
11975         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
11976         struct drm_encoder *encoder = &intel_encoder->base;
11977         struct drm_atomic_state *state = old->restore_state;
11978         int ret;
11979
11980         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11981                     connector->base.id, connector->name,
11982                     encoder->base.id, encoder->name);
11983
11984         if (!state)
11985                 return;
11986
11987         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11988         if (ret)
11989                 drm_dbg_kms(&i915->drm,
11990                             "Couldn't release load detect pipe: %i\n", ret);
11991         drm_atomic_state_put(state);
11992 }
11993
11994 static int i9xx_pll_refclk(struct drm_device *dev,
11995                            const struct intel_crtc_state *pipe_config)
11996 {
11997         struct drm_i915_private *dev_priv = to_i915(dev);
11998         u32 dpll = pipe_config->dpll_hw_state.dpll;
11999
12000         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
12001                 return dev_priv->vbt.lvds_ssc_freq;
12002         else if (HAS_PCH_SPLIT(dev_priv))
12003                 return 120000;
12004         else if (!IS_GEN(dev_priv, 2))
12005                 return 96000;
12006         else
12007                 return 48000;
12008 }
12009
12010 /* Returns the clock of the currently programmed mode of the given pipe. */
12011 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
12012                                 struct intel_crtc_state *pipe_config)
12013 {
12014         struct drm_device *dev = crtc->base.dev;
12015         struct drm_i915_private *dev_priv = to_i915(dev);
12016         enum pipe pipe = crtc->pipe;
12017         u32 dpll = pipe_config->dpll_hw_state.dpll;
12018         u32 fp;
12019         struct dpll clock;
12020         int port_clock;
12021         int refclk = i9xx_pll_refclk(dev, pipe_config);
12022
12023         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
12024                 fp = pipe_config->dpll_hw_state.fp0;
12025         else
12026                 fp = pipe_config->dpll_hw_state.fp1;
12027
12028         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
12029         if (IS_PINEVIEW(dev_priv)) {
12030                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
12031                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
12032         } else {
12033                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
12034                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
12035         }
12036
12037         if (!IS_GEN(dev_priv, 2)) {
12038                 if (IS_PINEVIEW(dev_priv))
12039                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
12040                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
12041                 else
12042                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
12043                                DPLL_FPA01_P1_POST_DIV_SHIFT);
12044
12045                 switch (dpll & DPLL_MODE_MASK) {
12046                 case DPLLB_MODE_DAC_SERIAL:
12047                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
12048                                 5 : 10;
12049                         break;
12050                 case DPLLB_MODE_LVDS:
12051                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
12052                                 7 : 14;
12053                         break;
12054                 default:
12055                         drm_dbg_kms(&dev_priv->drm,
12056                                     "Unknown DPLL mode %08x in programmed "
12057                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
12058                         return;
12059                 }
12060
12061                 if (IS_PINEVIEW(dev_priv))
12062                         port_clock = pnv_calc_dpll_params(refclk, &clock);
12063                 else
12064                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
12065         } else {
12066                 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
12067                                                                  LVDS);
12068                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
12069
12070                 if (is_lvds) {
12071                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
12072                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
12073
12074                         if (lvds & LVDS_CLKB_POWER_UP)
12075                                 clock.p2 = 7;
12076                         else
12077                                 clock.p2 = 14;
12078                 } else {
12079                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
12080                                 clock.p1 = 2;
12081                         else {
12082                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
12083                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
12084                         }
12085                         if (dpll & PLL_P2_DIVIDE_BY_4)
12086                                 clock.p2 = 4;
12087                         else
12088                                 clock.p2 = 2;
12089                 }
12090
12091                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
12092         }
12093
12094         /*
12095          * This value includes pixel_multiplier. We will use
12096          * port_clock to compute adjusted_mode.crtc_clock in the
12097          * encoder's get_config() function.
12098          */
12099         pipe_config->port_clock = port_clock;
12100 }
12101
12102 int intel_dotclock_calculate(int link_freq,
12103                              const struct intel_link_m_n *m_n)
12104 {
12105         /*
12106          * The calculation for the data clock is:
12107          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
12108          * But we want to avoid losing precison if possible, so:
12109          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
12110          *
12111          * and the link clock is simpler:
12112          * link_clock = (m * link_clock) / n
12113          */
12114
12115         if (!m_n->link_n)
12116                 return 0;
12117
12118         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
12119 }
12120
12121 static void ilk_pch_clock_get(struct intel_crtc *crtc,
12122                               struct intel_crtc_state *pipe_config)
12123 {
12124         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12125
12126         /* read out port_clock from the DPLL */
12127         i9xx_crtc_clock_get(crtc, pipe_config);
12128
12129         /*
12130          * In case there is an active pipe without active ports,
12131          * we may need some idea for the dotclock anyway.
12132          * Calculate one based on the FDI configuration.
12133          */
12134         pipe_config->hw.adjusted_mode.crtc_clock =
12135                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12136                                          &pipe_config->fdi_m_n);
12137 }
12138
12139 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
12140                                    struct intel_crtc *crtc)
12141 {
12142         memset(crtc_state, 0, sizeof(*crtc_state));
12143
12144         __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
12145
12146         crtc_state->cpu_transcoder = INVALID_TRANSCODER;
12147         crtc_state->master_transcoder = INVALID_TRANSCODER;
12148         crtc_state->hsw_workaround_pipe = INVALID_PIPE;
12149         crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
12150         crtc_state->scaler_state.scaler_id = -1;
12151         crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
12152 }
12153
12154 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
12155 {
12156         struct intel_crtc_state *crtc_state;
12157
12158         crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
12159
12160         if (crtc_state)
12161                 intel_crtc_state_reset(crtc_state, crtc);
12162
12163         return crtc_state;
12164 }
12165
12166 /* Returns the currently programmed mode of the given encoder. */
12167 struct drm_display_mode *
12168 intel_encoder_current_mode(struct intel_encoder *encoder)
12169 {
12170         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12171         struct intel_crtc_state *crtc_state;
12172         struct drm_display_mode *mode;
12173         struct intel_crtc *crtc;
12174         enum pipe pipe;
12175
12176         if (!encoder->get_hw_state(encoder, &pipe))
12177                 return NULL;
12178
12179         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12180
12181         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
12182         if (!mode)
12183                 return NULL;
12184
12185         crtc_state = intel_crtc_state_alloc(crtc);
12186         if (!crtc_state) {
12187                 kfree(mode);
12188                 return NULL;
12189         }
12190
12191         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
12192                 kfree(crtc_state);
12193                 kfree(mode);
12194                 return NULL;
12195         }
12196
12197         encoder->get_config(encoder, crtc_state);
12198
12199         intel_mode_from_pipe_config(mode, crtc_state);
12200
12201         kfree(crtc_state);
12202
12203         return mode;
12204 }
12205
12206 static void intel_crtc_destroy(struct drm_crtc *crtc)
12207 {
12208         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12209
12210         drm_crtc_cleanup(crtc);
12211         kfree(intel_crtc);
12212 }
12213
12214 /**
12215  * intel_wm_need_update - Check whether watermarks need updating
12216  * @cur: current plane state
12217  * @new: new plane state
12218  *
12219  * Check current plane state versus the new one to determine whether
12220  * watermarks need to be recalculated.
12221  *
12222  * Returns true or false.
12223  */
12224 static bool intel_wm_need_update(const struct intel_plane_state *cur,
12225                                  struct intel_plane_state *new)
12226 {
12227         /* Update watermarks on tiling or size changes. */
12228         if (new->uapi.visible != cur->uapi.visible)
12229                 return true;
12230
12231         if (!cur->hw.fb || !new->hw.fb)
12232                 return false;
12233
12234         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
12235             cur->hw.rotation != new->hw.rotation ||
12236             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
12237             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
12238             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
12239             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
12240                 return true;
12241
12242         return false;
12243 }
12244
12245 static bool needs_scaling(const struct intel_plane_state *state)
12246 {
12247         int src_w = drm_rect_width(&state->uapi.src) >> 16;
12248         int src_h = drm_rect_height(&state->uapi.src) >> 16;
12249         int dst_w = drm_rect_width(&state->uapi.dst);
12250         int dst_h = drm_rect_height(&state->uapi.dst);
12251
12252         return (src_w != dst_w || src_h != dst_h);
12253 }
12254
12255 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
12256                                     struct intel_crtc_state *crtc_state,
12257                                     const struct intel_plane_state *old_plane_state,
12258                                     struct intel_plane_state *plane_state)
12259 {
12260         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12261         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12262         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12263         bool mode_changed = needs_modeset(crtc_state);
12264         bool was_crtc_enabled = old_crtc_state->hw.active;
12265         bool is_crtc_enabled = crtc_state->hw.active;
12266         bool turn_off, turn_on, visible, was_visible;
12267         int ret;
12268
12269         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
12270                 ret = skl_update_scaler_plane(crtc_state, plane_state);
12271                 if (ret)
12272                         return ret;
12273         }
12274
12275         was_visible = old_plane_state->uapi.visible;
12276         visible = plane_state->uapi.visible;
12277
12278         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
12279                 was_visible = false;
12280
12281         /*
12282          * Visibility is calculated as if the crtc was on, but
12283          * after scaler setup everything depends on it being off
12284          * when the crtc isn't active.
12285          *
12286          * FIXME this is wrong for watermarks. Watermarks should also
12287          * be computed as if the pipe would be active. Perhaps move
12288          * per-plane wm computation to the .check_plane() hook, and
12289          * only combine the results from all planes in the current place?
12290          */
12291         if (!is_crtc_enabled) {
12292                 intel_plane_set_invisible(crtc_state, plane_state);
12293                 visible = false;
12294         }
12295
12296         if (!was_visible && !visible)
12297                 return 0;
12298
12299         turn_off = was_visible && (!visible || mode_changed);
12300         turn_on = visible && (!was_visible || mode_changed);
12301
12302         drm_dbg_atomic(&dev_priv->drm,
12303                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12304                        crtc->base.base.id, crtc->base.name,
12305                        plane->base.base.id, plane->base.name,
12306                        was_visible, visible,
12307                        turn_off, turn_on, mode_changed);
12308
12309         if (turn_on) {
12310                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12311                         crtc_state->update_wm_pre = true;
12312
12313                 /* must disable cxsr around plane enable/disable */
12314                 if (plane->id != PLANE_CURSOR)
12315                         crtc_state->disable_cxsr = true;
12316         } else if (turn_off) {
12317                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12318                         crtc_state->update_wm_post = true;
12319
12320                 /* must disable cxsr around plane enable/disable */
12321                 if (plane->id != PLANE_CURSOR)
12322                         crtc_state->disable_cxsr = true;
12323         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
12324                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12325                         /* FIXME bollocks */
12326                         crtc_state->update_wm_pre = true;
12327                         crtc_state->update_wm_post = true;
12328                 }
12329         }
12330
12331         if (visible || was_visible)
12332                 crtc_state->fb_bits |= plane->frontbuffer_bit;
12333
12334         /*
12335          * ILK/SNB DVSACNTR/Sprite Enable
12336          * IVB SPR_CTL/Sprite Enable
12337          * "When in Self Refresh Big FIFO mode, a write to enable the
12338          *  plane will be internally buffered and delayed while Big FIFO
12339          *  mode is exiting."
12340          *
12341          * Which means that enabling the sprite can take an extra frame
12342          * when we start in big FIFO mode (LP1+). Thus we need to drop
12343          * down to LP0 and wait for vblank in order to make sure the
12344          * sprite gets enabled on the next vblank after the register write.
12345          * Doing otherwise would risk enabling the sprite one frame after
12346          * we've already signalled flip completion. We can resume LP1+
12347          * once the sprite has been enabled.
12348          *
12349          *
12350          * WaCxSRDisabledForSpriteScaling:ivb
12351          * IVB SPR_SCALE/Scaling Enable
12352          * "Low Power watermarks must be disabled for at least one
12353          *  frame before enabling sprite scaling, and kept disabled
12354          *  until sprite scaling is disabled."
12355          *
12356          * ILK/SNB DVSASCALE/Scaling Enable
12357          * "When in Self Refresh Big FIFO mode, scaling enable will be
12358          *  masked off while Big FIFO mode is exiting."
12359          *
12360          * Despite the w/a only being listed for IVB we assume that
12361          * the ILK/SNB note has similar ramifications, hence we apply
12362          * the w/a on all three platforms.
12363          *
12364          * With experimental results seems this is needed also for primary
12365          * plane, not only sprite plane.
12366          */
12367         if (plane->id != PLANE_CURSOR &&
12368             (IS_GEN_RANGE(dev_priv, 5, 6) ||
12369              IS_IVYBRIDGE(dev_priv)) &&
12370             (turn_on || (!needs_scaling(old_plane_state) &&
12371                          needs_scaling(plane_state))))
12372                 crtc_state->disable_lp_wm = true;
12373
12374         return 0;
12375 }
12376
12377 static bool encoders_cloneable(const struct intel_encoder *a,
12378                                const struct intel_encoder *b)
12379 {
12380         /* masks could be asymmetric, so check both ways */
12381         return a == b || (a->cloneable & (1 << b->type) &&
12382                           b->cloneable & (1 << a->type));
12383 }
12384
12385 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12386                                          struct intel_crtc *crtc,
12387                                          struct intel_encoder *encoder)
12388 {
12389         struct intel_encoder *source_encoder;
12390         struct drm_connector *connector;
12391         struct drm_connector_state *connector_state;
12392         int i;
12393
12394         for_each_new_connector_in_state(state, connector, connector_state, i) {
12395                 if (connector_state->crtc != &crtc->base)
12396                         continue;
12397
12398                 source_encoder =
12399                         to_intel_encoder(connector_state->best_encoder);
12400                 if (!encoders_cloneable(encoder, source_encoder))
12401                         return false;
12402         }
12403
12404         return true;
12405 }
12406
12407 static int icl_add_linked_planes(struct intel_atomic_state *state)
12408 {
12409         struct intel_plane *plane, *linked;
12410         struct intel_plane_state *plane_state, *linked_plane_state;
12411         int i;
12412
12413         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12414                 linked = plane_state->planar_linked_plane;
12415
12416                 if (!linked)
12417                         continue;
12418
12419                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12420                 if (IS_ERR(linked_plane_state))
12421                         return PTR_ERR(linked_plane_state);
12422
12423                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
12424                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
12425         }
12426
12427         return 0;
12428 }
12429
12430 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12431 {
12432         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12433         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12434         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12435         struct intel_plane *plane, *linked;
12436         struct intel_plane_state *plane_state;
12437         int i;
12438
12439         if (INTEL_GEN(dev_priv) < 11)
12440                 return 0;
12441
12442         /*
12443          * Destroy all old plane links and make the slave plane invisible
12444          * in the crtc_state->active_planes mask.
12445          */
12446         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12447                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12448                         continue;
12449
12450                 plane_state->planar_linked_plane = NULL;
12451                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12452                         crtc_state->active_planes &= ~BIT(plane->id);
12453                         crtc_state->update_planes |= BIT(plane->id);
12454                 }
12455
12456                 plane_state->planar_slave = false;
12457         }
12458
12459         if (!crtc_state->nv12_planes)
12460                 return 0;
12461
12462         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12463                 struct intel_plane_state *linked_state = NULL;
12464
12465                 if (plane->pipe != crtc->pipe ||
12466                     !(crtc_state->nv12_planes & BIT(plane->id)))
12467                         continue;
12468
12469                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12470                         if (!icl_is_nv12_y_plane(linked->id))
12471                                 continue;
12472
12473                         if (crtc_state->active_planes & BIT(linked->id))
12474                                 continue;
12475
12476                         linked_state = intel_atomic_get_plane_state(state, linked);
12477                         if (IS_ERR(linked_state))
12478                                 return PTR_ERR(linked_state);
12479
12480                         break;
12481                 }
12482
12483                 if (!linked_state) {
12484                         drm_dbg_kms(&dev_priv->drm,
12485                                     "Need %d free Y planes for planar YUV\n",
12486                                     hweight8(crtc_state->nv12_planes));
12487
12488                         return -EINVAL;
12489                 }
12490
12491                 plane_state->planar_linked_plane = linked;
12492
12493                 linked_state->planar_slave = true;
12494                 linked_state->planar_linked_plane = plane;
12495                 crtc_state->active_planes |= BIT(linked->id);
12496                 crtc_state->update_planes |= BIT(linked->id);
12497                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
12498                             linked->base.name, plane->base.name);
12499
12500                 /* Copy parameters to slave plane */
12501                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12502                 linked_state->color_ctl = plane_state->color_ctl;
12503                 linked_state->view = plane_state->view;
12504                 memcpy(linked_state->color_plane, plane_state->color_plane,
12505                        sizeof(linked_state->color_plane));
12506
12507                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12508                 linked_state->uapi.src = plane_state->uapi.src;
12509                 linked_state->uapi.dst = plane_state->uapi.dst;
12510
12511                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12512                         if (linked->id == PLANE_SPRITE5)
12513                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12514                         else if (linked->id == PLANE_SPRITE4)
12515                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12516                         else
12517                                 MISSING_CASE(linked->id);
12518                 }
12519         }
12520
12521         return 0;
12522 }
12523
12524 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12525 {
12526         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12527         struct intel_atomic_state *state =
12528                 to_intel_atomic_state(new_crtc_state->uapi.state);
12529         const struct intel_crtc_state *old_crtc_state =
12530                 intel_atomic_get_old_crtc_state(state, crtc);
12531
12532         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12533 }
12534
12535 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
12536 {
12537         const struct drm_display_mode *adjusted_mode =
12538                 &crtc_state->hw.adjusted_mode;
12539
12540         if (!crtc_state->hw.enable)
12541                 return 0;
12542
12543         return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12544                                  adjusted_mode->crtc_clock);
12545 }
12546
12547 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
12548                                const struct intel_cdclk_state *cdclk_state)
12549 {
12550         const struct drm_display_mode *adjusted_mode =
12551                 &crtc_state->hw.adjusted_mode;
12552
12553         if (!crtc_state->hw.enable)
12554                 return 0;
12555
12556         return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12557                                  cdclk_state->logical.cdclk);
12558 }
12559
12560 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
12561 {
12562         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12563         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12564         const struct drm_display_mode *adjusted_mode =
12565                 &crtc_state->hw.adjusted_mode;
12566         u16 linetime_wm;
12567
12568         if (!crtc_state->hw.enable)
12569                 return 0;
12570
12571         linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
12572                                    crtc_state->pixel_rate);
12573
12574         /* Display WA #1135: BXT:ALL GLK:ALL */
12575         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
12576                 linetime_wm /= 2;
12577
12578         return linetime_wm;
12579 }
12580
12581 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
12582                                    struct intel_crtc *crtc)
12583 {
12584         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12585         struct intel_crtc_state *crtc_state =
12586                 intel_atomic_get_new_crtc_state(state, crtc);
12587         const struct intel_cdclk_state *cdclk_state;
12588
12589         if (INTEL_GEN(dev_priv) >= 9)
12590                 crtc_state->linetime = skl_linetime_wm(crtc_state);
12591         else
12592                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
12593
12594         if (!hsw_crtc_supports_ips(crtc))
12595                 return 0;
12596
12597         cdclk_state = intel_atomic_get_cdclk_state(state);
12598         if (IS_ERR(cdclk_state))
12599                 return PTR_ERR(cdclk_state);
12600
12601         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
12602                                                        cdclk_state);
12603
12604         return 0;
12605 }
12606
12607 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12608                                    struct intel_crtc *crtc)
12609 {
12610         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12611         struct intel_crtc_state *crtc_state =
12612                 intel_atomic_get_new_crtc_state(state, crtc);
12613         bool mode_changed = needs_modeset(crtc_state);
12614         int ret;
12615
12616         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12617             mode_changed && !crtc_state->hw.active)
12618                 crtc_state->update_wm_post = true;
12619
12620         if (mode_changed && crtc_state->hw.enable &&
12621             dev_priv->display.crtc_compute_clock &&
12622             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
12623                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12624                 if (ret)
12625                         return ret;
12626         }
12627
12628         /*
12629          * May need to update pipe gamma enable bits
12630          * when C8 planes are getting enabled/disabled.
12631          */
12632         if (c8_planes_changed(crtc_state))
12633                 crtc_state->uapi.color_mgmt_changed = true;
12634
12635         if (mode_changed || crtc_state->update_pipe ||
12636             crtc_state->uapi.color_mgmt_changed) {
12637                 ret = intel_color_check(crtc_state);
12638                 if (ret)
12639                         return ret;
12640         }
12641
12642         if (dev_priv->display.compute_pipe_wm) {
12643                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12644                 if (ret) {
12645                         drm_dbg_kms(&dev_priv->drm,
12646                                     "Target pipe watermarks are invalid\n");
12647                         return ret;
12648                 }
12649         }
12650
12651         if (dev_priv->display.compute_intermediate_wm) {
12652                 if (drm_WARN_ON(&dev_priv->drm,
12653                                 !dev_priv->display.compute_pipe_wm))
12654                         return 0;
12655
12656                 /*
12657                  * Calculate 'intermediate' watermarks that satisfy both the
12658                  * old state and the new state.  We can program these
12659                  * immediately.
12660                  */
12661                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12662                 if (ret) {
12663                         drm_dbg_kms(&dev_priv->drm,
12664                                     "No valid intermediate pipe watermarks are possible\n");
12665                         return ret;
12666                 }
12667         }
12668
12669         if (INTEL_GEN(dev_priv) >= 9) {
12670                 if (mode_changed || crtc_state->update_pipe) {
12671                         ret = skl_update_scaler_crtc(crtc_state);
12672                         if (ret)
12673                                 return ret;
12674                 }
12675
12676                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
12677                 if (ret)
12678                         return ret;
12679         }
12680
12681         if (HAS_IPS(dev_priv)) {
12682                 ret = hsw_compute_ips_config(crtc_state);
12683                 if (ret)
12684                         return ret;
12685         }
12686
12687         if (INTEL_GEN(dev_priv) >= 9 ||
12688             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12689                 ret = hsw_compute_linetime_wm(state, crtc);
12690                 if (ret)
12691                         return ret;
12692
12693         }
12694
12695         return 0;
12696 }
12697
12698 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12699 {
12700         struct intel_connector *connector;
12701         struct drm_connector_list_iter conn_iter;
12702
12703         drm_connector_list_iter_begin(dev, &conn_iter);
12704         for_each_intel_connector_iter(connector, &conn_iter) {
12705                 if (connector->base.state->crtc)
12706                         drm_connector_put(&connector->base);
12707
12708                 if (connector->base.encoder) {
12709                         connector->base.state->best_encoder =
12710                                 connector->base.encoder;
12711                         connector->base.state->crtc =
12712                                 connector->base.encoder->crtc;
12713
12714                         drm_connector_get(&connector->base);
12715                 } else {
12716                         connector->base.state->best_encoder = NULL;
12717                         connector->base.state->crtc = NULL;
12718                 }
12719         }
12720         drm_connector_list_iter_end(&conn_iter);
12721 }
12722
12723 static int
12724 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12725                       struct intel_crtc_state *pipe_config)
12726 {
12727         struct drm_connector *connector = conn_state->connector;
12728         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12729         const struct drm_display_info *info = &connector->display_info;
12730         int bpp;
12731
12732         switch (conn_state->max_bpc) {
12733         case 6 ... 7:
12734                 bpp = 6 * 3;
12735                 break;
12736         case 8 ... 9:
12737                 bpp = 8 * 3;
12738                 break;
12739         case 10 ... 11:
12740                 bpp = 10 * 3;
12741                 break;
12742         case 12:
12743                 bpp = 12 * 3;
12744                 break;
12745         default:
12746                 return -EINVAL;
12747         }
12748
12749         if (bpp < pipe_config->pipe_bpp) {
12750                 drm_dbg_kms(&i915->drm,
12751                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12752                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12753                             connector->base.id, connector->name,
12754                             bpp, 3 * info->bpc,
12755                             3 * conn_state->max_requested_bpc,
12756                             pipe_config->pipe_bpp);
12757
12758                 pipe_config->pipe_bpp = bpp;
12759         }
12760
12761         return 0;
12762 }
12763
12764 static int
12765 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12766                           struct intel_crtc_state *pipe_config)
12767 {
12768         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12769         struct drm_atomic_state *state = pipe_config->uapi.state;
12770         struct drm_connector *connector;
12771         struct drm_connector_state *connector_state;
12772         int bpp, i;
12773
12774         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12775             IS_CHERRYVIEW(dev_priv)))
12776                 bpp = 10*3;
12777         else if (INTEL_GEN(dev_priv) >= 5)
12778                 bpp = 12*3;
12779         else
12780                 bpp = 8*3;
12781
12782         pipe_config->pipe_bpp = bpp;
12783
12784         /* Clamp display bpp to connector max bpp */
12785         for_each_new_connector_in_state(state, connector, connector_state, i) {
12786                 int ret;
12787
12788                 if (connector_state->crtc != &crtc->base)
12789                         continue;
12790
12791                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12792                 if (ret)
12793                         return ret;
12794         }
12795
12796         return 0;
12797 }
12798
12799 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
12800                                     const struct drm_display_mode *mode)
12801 {
12802         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
12803                     "type: 0x%x flags: 0x%x\n",
12804                     mode->crtc_clock,
12805                     mode->crtc_hdisplay, mode->crtc_hsync_start,
12806                     mode->crtc_hsync_end, mode->crtc_htotal,
12807                     mode->crtc_vdisplay, mode->crtc_vsync_start,
12808                     mode->crtc_vsync_end, mode->crtc_vtotal,
12809                     mode->type, mode->flags);
12810 }
12811
12812 static inline void
12813 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12814                       const char *id, unsigned int lane_count,
12815                       const struct intel_link_m_n *m_n)
12816 {
12817         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12818
12819         drm_dbg_kms(&i915->drm,
12820                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12821                     id, lane_count,
12822                     m_n->gmch_m, m_n->gmch_n,
12823                     m_n->link_m, m_n->link_n, m_n->tu);
12824 }
12825
12826 static void
12827 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12828                      const union hdmi_infoframe *frame)
12829 {
12830         if (!drm_debug_enabled(DRM_UT_KMS))
12831                 return;
12832
12833         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12834 }
12835
12836 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12837
12838 static const char * const output_type_str[] = {
12839         OUTPUT_TYPE(UNUSED),
12840         OUTPUT_TYPE(ANALOG),
12841         OUTPUT_TYPE(DVO),
12842         OUTPUT_TYPE(SDVO),
12843         OUTPUT_TYPE(LVDS),
12844         OUTPUT_TYPE(TVOUT),
12845         OUTPUT_TYPE(HDMI),
12846         OUTPUT_TYPE(DP),
12847         OUTPUT_TYPE(EDP),
12848         OUTPUT_TYPE(DSI),
12849         OUTPUT_TYPE(DDI),
12850         OUTPUT_TYPE(DP_MST),
12851 };
12852
12853 #undef OUTPUT_TYPE
12854
12855 static void snprintf_output_types(char *buf, size_t len,
12856                                   unsigned int output_types)
12857 {
12858         char *str = buf;
12859         int i;
12860
12861         str[0] = '\0';
12862
12863         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12864                 int r;
12865
12866                 if ((output_types & BIT(i)) == 0)
12867                         continue;
12868
12869                 r = snprintf(str, len, "%s%s",
12870                              str != buf ? "," : "", output_type_str[i]);
12871                 if (r >= len)
12872                         break;
12873                 str += r;
12874                 len -= r;
12875
12876                 output_types &= ~BIT(i);
12877         }
12878
12879         WARN_ON_ONCE(output_types != 0);
12880 }
12881
12882 static const char * const output_format_str[] = {
12883         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12884         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12885         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12886         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12887 };
12888
12889 static const char *output_formats(enum intel_output_format format)
12890 {
12891         if (format >= ARRAY_SIZE(output_format_str))
12892                 format = INTEL_OUTPUT_FORMAT_INVALID;
12893         return output_format_str[format];
12894 }
12895
12896 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12897 {
12898         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12899         struct drm_i915_private *i915 = to_i915(plane->base.dev);
12900         const struct drm_framebuffer *fb = plane_state->hw.fb;
12901         struct drm_format_name_buf format_name;
12902
12903         if (!fb) {
12904                 drm_dbg_kms(&i915->drm,
12905                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12906                             plane->base.base.id, plane->base.name,
12907                             yesno(plane_state->uapi.visible));
12908                 return;
12909         }
12910
12911         drm_dbg_kms(&i915->drm,
12912                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12913                     plane->base.base.id, plane->base.name,
12914                     fb->base.id, fb->width, fb->height,
12915                     drm_get_format_name(fb->format->format, &format_name),
12916                     yesno(plane_state->uapi.visible));
12917         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
12918                     plane_state->hw.rotation, plane_state->scaler_id);
12919         if (plane_state->uapi.visible)
12920                 drm_dbg_kms(&i915->drm,
12921                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12922                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
12923                             DRM_RECT_ARG(&plane_state->uapi.dst));
12924 }
12925
12926 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12927                                    struct intel_atomic_state *state,
12928                                    const char *context)
12929 {
12930         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12931         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12932         const struct intel_plane_state *plane_state;
12933         struct intel_plane *plane;
12934         char buf[64];
12935         int i;
12936
12937         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
12938                     crtc->base.base.id, crtc->base.name,
12939                     yesno(pipe_config->hw.enable), context);
12940
12941         if (!pipe_config->hw.enable)
12942                 goto dump_planes;
12943
12944         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12945         drm_dbg_kms(&dev_priv->drm,
12946                     "active: %s, output_types: %s (0x%x), output format: %s\n",
12947                     yesno(pipe_config->hw.active),
12948                     buf, pipe_config->output_types,
12949                     output_formats(pipe_config->output_format));
12950
12951         drm_dbg_kms(&dev_priv->drm,
12952                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12953                     transcoder_name(pipe_config->cpu_transcoder),
12954                     pipe_config->pipe_bpp, pipe_config->dither);
12955
12956         drm_dbg_kms(&dev_priv->drm,
12957                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
12958                     transcoder_name(pipe_config->master_transcoder),
12959                     pipe_config->sync_mode_slaves_mask);
12960
12961         if (pipe_config->has_pch_encoder)
12962                 intel_dump_m_n_config(pipe_config, "fdi",
12963                                       pipe_config->fdi_lanes,
12964                                       &pipe_config->fdi_m_n);
12965
12966         if (intel_crtc_has_dp_encoder(pipe_config)) {
12967                 intel_dump_m_n_config(pipe_config, "dp m_n",
12968                                 pipe_config->lane_count, &pipe_config->dp_m_n);
12969                 if (pipe_config->has_drrs)
12970                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
12971                                               pipe_config->lane_count,
12972                                               &pipe_config->dp_m2_n2);
12973         }
12974
12975         drm_dbg_kms(&dev_priv->drm,
12976                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12977                     pipe_config->has_audio, pipe_config->has_infoframe,
12978                     pipe_config->infoframes.enable);
12979
12980         if (pipe_config->infoframes.enable &
12981             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12982                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
12983                             pipe_config->infoframes.gcp);
12984         if (pipe_config->infoframes.enable &
12985             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12986                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12987         if (pipe_config->infoframes.enable &
12988             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12989                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12990         if (pipe_config->infoframes.enable &
12991             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12992                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12993
12994         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
12995         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12996         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
12997         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12998         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
12999         drm_dbg_kms(&dev_priv->drm,
13000                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
13001                     pipe_config->port_clock,
13002                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
13003                     pipe_config->pixel_rate);
13004
13005         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
13006                     pipe_config->linetime, pipe_config->ips_linetime);
13007
13008         if (INTEL_GEN(dev_priv) >= 9)
13009                 drm_dbg_kms(&dev_priv->drm,
13010                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
13011                             crtc->num_scalers,
13012                             pipe_config->scaler_state.scaler_users,
13013                             pipe_config->scaler_state.scaler_id);
13014
13015         if (HAS_GMCH(dev_priv))
13016                 drm_dbg_kms(&dev_priv->drm,
13017                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
13018                             pipe_config->gmch_pfit.control,
13019                             pipe_config->gmch_pfit.pgm_ratios,
13020                             pipe_config->gmch_pfit.lvds_border_bits);
13021         else
13022                 drm_dbg_kms(&dev_priv->drm,
13023                             "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
13024                             pipe_config->pch_pfit.pos,
13025                             pipe_config->pch_pfit.size,
13026                             enableddisabled(pipe_config->pch_pfit.enabled),
13027                             yesno(pipe_config->pch_pfit.force_thru));
13028
13029         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
13030                     pipe_config->ips_enabled, pipe_config->double_wide);
13031
13032         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
13033
13034         if (IS_CHERRYVIEW(dev_priv))
13035                 drm_dbg_kms(&dev_priv->drm,
13036                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13037                             pipe_config->cgm_mode, pipe_config->gamma_mode,
13038                             pipe_config->gamma_enable, pipe_config->csc_enable);
13039         else
13040                 drm_dbg_kms(&dev_priv->drm,
13041                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13042                             pipe_config->csc_mode, pipe_config->gamma_mode,
13043                             pipe_config->gamma_enable, pipe_config->csc_enable);
13044
13045         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
13046                     transcoder_name(pipe_config->mst_master_transcoder));
13047
13048 dump_planes:
13049         if (!state)
13050                 return;
13051
13052         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13053                 if (plane->pipe == crtc->pipe)
13054                         intel_dump_plane_state(plane_state);
13055         }
13056 }
13057
13058 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
13059 {
13060         struct drm_device *dev = state->base.dev;
13061         struct drm_connector *connector;
13062         struct drm_connector_list_iter conn_iter;
13063         unsigned int used_ports = 0;
13064         unsigned int used_mst_ports = 0;
13065         bool ret = true;
13066
13067         /*
13068          * We're going to peek into connector->state,
13069          * hence connection_mutex must be held.
13070          */
13071         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
13072
13073         /*
13074          * Walk the connector list instead of the encoder
13075          * list to detect the problem on ddi platforms
13076          * where there's just one encoder per digital port.
13077          */
13078         drm_connector_list_iter_begin(dev, &conn_iter);
13079         drm_for_each_connector_iter(connector, &conn_iter) {
13080                 struct drm_connector_state *connector_state;
13081                 struct intel_encoder *encoder;
13082
13083                 connector_state =
13084                         drm_atomic_get_new_connector_state(&state->base,
13085                                                            connector);
13086                 if (!connector_state)
13087                         connector_state = connector->state;
13088
13089                 if (!connector_state->best_encoder)
13090                         continue;
13091
13092                 encoder = to_intel_encoder(connector_state->best_encoder);
13093
13094                 drm_WARN_ON(dev, !connector_state->crtc);
13095
13096                 switch (encoder->type) {
13097                 case INTEL_OUTPUT_DDI:
13098                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
13099                                 break;
13100                         /* else, fall through */
13101                 case INTEL_OUTPUT_DP:
13102                 case INTEL_OUTPUT_HDMI:
13103                 case INTEL_OUTPUT_EDP:
13104                         /* the same port mustn't appear more than once */
13105                         if (used_ports & BIT(encoder->port))
13106                                 ret = false;
13107
13108                         used_ports |= BIT(encoder->port);
13109                         break;
13110                 case INTEL_OUTPUT_DP_MST:
13111                         used_mst_ports |=
13112                                 1 << encoder->port;
13113                         break;
13114                 default:
13115                         break;
13116                 }
13117         }
13118         drm_connector_list_iter_end(&conn_iter);
13119
13120         /* can't mix MST and SST/HDMI on the same port */
13121         if (used_ports & used_mst_ports)
13122                 return false;
13123
13124         return ret;
13125 }
13126
13127 static void
13128 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
13129 {
13130         intel_crtc_copy_color_blobs(crtc_state);
13131 }
13132
13133 static void
13134 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
13135 {
13136         crtc_state->hw.enable = crtc_state->uapi.enable;
13137         crtc_state->hw.active = crtc_state->uapi.active;
13138         crtc_state->hw.mode = crtc_state->uapi.mode;
13139         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
13140         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
13141 }
13142
13143 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
13144 {
13145         crtc_state->uapi.enable = crtc_state->hw.enable;
13146         crtc_state->uapi.active = crtc_state->hw.active;
13147         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
13148
13149         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
13150
13151         /* copy color blobs to uapi */
13152         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
13153                                   crtc_state->hw.degamma_lut);
13154         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
13155                                   crtc_state->hw.gamma_lut);
13156         drm_property_replace_blob(&crtc_state->uapi.ctm,
13157                                   crtc_state->hw.ctm);
13158 }
13159
13160 static int
13161 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
13162 {
13163         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13164         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13165         struct intel_crtc_state *saved_state;
13166
13167         saved_state = intel_crtc_state_alloc(crtc);
13168         if (!saved_state)
13169                 return -ENOMEM;
13170
13171         /* free the old crtc_state->hw members */
13172         intel_crtc_free_hw_state(crtc_state);
13173
13174         /* FIXME: before the switch to atomic started, a new pipe_config was
13175          * kzalloc'd. Code that depends on any field being zero should be
13176          * fixed, so that the crtc_state can be safely duplicated. For now,
13177          * only fields that are know to not cause problems are preserved. */
13178
13179         saved_state->uapi = crtc_state->uapi;
13180         saved_state->scaler_state = crtc_state->scaler_state;
13181         saved_state->shared_dpll = crtc_state->shared_dpll;
13182         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
13183         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
13184                sizeof(saved_state->icl_port_dplls));
13185         saved_state->crc_enabled = crtc_state->crc_enabled;
13186         if (IS_G4X(dev_priv) ||
13187             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13188                 saved_state->wm = crtc_state->wm;
13189
13190         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
13191         kfree(saved_state);
13192
13193         intel_crtc_copy_uapi_to_hw_state(crtc_state);
13194
13195         return 0;
13196 }
13197
13198 static int
13199 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
13200 {
13201         struct drm_crtc *crtc = pipe_config->uapi.crtc;
13202         struct drm_atomic_state *state = pipe_config->uapi.state;
13203         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13204         struct drm_connector *connector;
13205         struct drm_connector_state *connector_state;
13206         int base_bpp, ret, i;
13207         bool retry = true;
13208
13209         pipe_config->cpu_transcoder =
13210                 (enum transcoder) to_intel_crtc(crtc)->pipe;
13211
13212         /*
13213          * Sanitize sync polarity flags based on requested ones. If neither
13214          * positive or negative polarity is requested, treat this as meaning
13215          * negative polarity.
13216          */
13217         if (!(pipe_config->hw.adjusted_mode.flags &
13218               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
13219                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
13220
13221         if (!(pipe_config->hw.adjusted_mode.flags &
13222               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
13223                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
13224
13225         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
13226                                         pipe_config);
13227         if (ret)
13228                 return ret;
13229
13230         base_bpp = pipe_config->pipe_bpp;
13231
13232         /*
13233          * Determine the real pipe dimensions. Note that stereo modes can
13234          * increase the actual pipe size due to the frame doubling and
13235          * insertion of additional space for blanks between the frame. This
13236          * is stored in the crtc timings. We use the requested mode to do this
13237          * computation to clearly distinguish it from the adjusted mode, which
13238          * can be changed by the connectors in the below retry loop.
13239          */
13240         drm_mode_get_hv_timing(&pipe_config->hw.mode,
13241                                &pipe_config->pipe_src_w,
13242                                &pipe_config->pipe_src_h);
13243
13244         for_each_new_connector_in_state(state, connector, connector_state, i) {
13245                 struct intel_encoder *encoder =
13246                         to_intel_encoder(connector_state->best_encoder);
13247
13248                 if (connector_state->crtc != crtc)
13249                         continue;
13250
13251                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
13252                         drm_dbg_kms(&i915->drm,
13253                                     "rejecting invalid cloning configuration\n");
13254                         return -EINVAL;
13255                 }
13256
13257                 /*
13258                  * Determine output_types before calling the .compute_config()
13259                  * hooks so that the hooks can use this information safely.
13260                  */
13261                 if (encoder->compute_output_type)
13262                         pipe_config->output_types |=
13263                                 BIT(encoder->compute_output_type(encoder, pipe_config,
13264                                                                  connector_state));
13265                 else
13266                         pipe_config->output_types |= BIT(encoder->type);
13267         }
13268
13269 encoder_retry:
13270         /* Ensure the port clock defaults are reset when retrying. */
13271         pipe_config->port_clock = 0;
13272         pipe_config->pixel_multiplier = 1;
13273
13274         /* Fill in default crtc timings, allow encoders to overwrite them. */
13275         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
13276                               CRTC_STEREO_DOUBLE);
13277
13278         /* Pass our mode to the connectors and the CRTC to give them a chance to
13279          * adjust it according to limitations or connector properties, and also
13280          * a chance to reject the mode entirely.
13281          */
13282         for_each_new_connector_in_state(state, connector, connector_state, i) {
13283                 struct intel_encoder *encoder =
13284                         to_intel_encoder(connector_state->best_encoder);
13285
13286                 if (connector_state->crtc != crtc)
13287                         continue;
13288
13289                 ret = encoder->compute_config(encoder, pipe_config,
13290                                               connector_state);
13291                 if (ret < 0) {
13292                         if (ret != -EDEADLK)
13293                                 drm_dbg_kms(&i915->drm,
13294                                             "Encoder config failure: %d\n",
13295                                             ret);
13296                         return ret;
13297                 }
13298         }
13299
13300         /* Set default port clock if not overwritten by the encoder. Needs to be
13301          * done afterwards in case the encoder adjusts the mode. */
13302         if (!pipe_config->port_clock)
13303                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
13304                         * pipe_config->pixel_multiplier;
13305
13306         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
13307         if (ret == -EDEADLK)
13308                 return ret;
13309         if (ret < 0) {
13310                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
13311                 return ret;
13312         }
13313
13314         if (ret == RETRY) {
13315                 if (drm_WARN(&i915->drm, !retry,
13316                              "loop in pipe configuration computation\n"))
13317                         return -EINVAL;
13318
13319                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
13320                 retry = false;
13321                 goto encoder_retry;
13322         }
13323
13324         /* Dithering seems to not pass-through bits correctly when it should, so
13325          * only enable it on 6bpc panels and when its not a compliance
13326          * test requesting 6bpc video pattern.
13327          */
13328         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
13329                 !pipe_config->dither_force_disable;
13330         drm_dbg_kms(&i915->drm,
13331                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
13332                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13333
13334         /*
13335          * Make drm_calc_timestamping_constants in
13336          * drm_atomic_helper_update_legacy_modeset_state() happy
13337          */
13338         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
13339
13340         return 0;
13341 }
13342
13343 static int
13344 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
13345 {
13346         struct intel_atomic_state *state =
13347                 to_intel_atomic_state(crtc_state->uapi.state);
13348         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13349         struct drm_connector_state *conn_state;
13350         struct drm_connector *connector;
13351         int i;
13352
13353         for_each_new_connector_in_state(&state->base, connector,
13354                                         conn_state, i) {
13355                 struct intel_encoder *encoder =
13356                         to_intel_encoder(conn_state->best_encoder);
13357                 int ret;
13358
13359                 if (conn_state->crtc != &crtc->base ||
13360                     !encoder->compute_config_late)
13361                         continue;
13362
13363                 ret = encoder->compute_config_late(encoder, crtc_state,
13364                                                    conn_state);
13365                 if (ret)
13366                         return ret;
13367         }
13368
13369         return 0;
13370 }
13371
13372 bool intel_fuzzy_clock_check(int clock1, int clock2)
13373 {
13374         int diff;
13375
13376         if (clock1 == clock2)
13377                 return true;
13378
13379         if (!clock1 || !clock2)
13380                 return false;
13381
13382         diff = abs(clock1 - clock2);
13383
13384         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13385                 return true;
13386
13387         return false;
13388 }
13389
13390 static bool
13391 intel_compare_m_n(unsigned int m, unsigned int n,
13392                   unsigned int m2, unsigned int n2,
13393                   bool exact)
13394 {
13395         if (m == m2 && n == n2)
13396                 return true;
13397
13398         if (exact || !m || !n || !m2 || !n2)
13399                 return false;
13400
13401         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13402
13403         if (n > n2) {
13404                 while (n > n2) {
13405                         m2 <<= 1;
13406                         n2 <<= 1;
13407                 }
13408         } else if (n < n2) {
13409                 while (n < n2) {
13410                         m <<= 1;
13411                         n <<= 1;
13412                 }
13413         }
13414
13415         if (n != n2)
13416                 return false;
13417
13418         return intel_fuzzy_clock_check(m, m2);
13419 }
13420
13421 static bool
13422 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13423                        const struct intel_link_m_n *m2_n2,
13424                        bool exact)
13425 {
13426         return m_n->tu == m2_n2->tu &&
13427                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13428                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13429                 intel_compare_m_n(m_n->link_m, m_n->link_n,
13430                                   m2_n2->link_m, m2_n2->link_n, exact);
13431 }
13432
13433 static bool
13434 intel_compare_infoframe(const union hdmi_infoframe *a,
13435                         const union hdmi_infoframe *b)
13436 {
13437         return memcmp(a, b, sizeof(*a)) == 0;
13438 }
13439
13440 static void
13441 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13442                                bool fastset, const char *name,
13443                                const union hdmi_infoframe *a,
13444                                const union hdmi_infoframe *b)
13445 {
13446         if (fastset) {
13447                 if (!drm_debug_enabled(DRM_UT_KMS))
13448                         return;
13449
13450                 drm_dbg_kms(&dev_priv->drm,
13451                             "fastset mismatch in %s infoframe\n", name);
13452                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
13453                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13454                 drm_dbg_kms(&dev_priv->drm, "found:\n");
13455                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13456         } else {
13457                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
13458                 drm_err(&dev_priv->drm, "expected:\n");
13459                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13460                 drm_err(&dev_priv->drm, "found:\n");
13461                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13462         }
13463 }
13464
13465 static void __printf(4, 5)
13466 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13467                      const char *name, const char *format, ...)
13468 {
13469         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
13470         struct va_format vaf;
13471         va_list args;
13472
13473         va_start(args, format);
13474         vaf.fmt = format;
13475         vaf.va = &args;
13476
13477         if (fastset)
13478                 drm_dbg_kms(&i915->drm,
13479                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13480                             crtc->base.base.id, crtc->base.name, name, &vaf);
13481         else
13482                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
13483                         crtc->base.base.id, crtc->base.name, name, &vaf);
13484
13485         va_end(args);
13486 }
13487
13488 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13489 {
13490         if (i915_modparams.fastboot != -1)
13491                 return i915_modparams.fastboot;
13492
13493         /* Enable fastboot by default on Skylake and newer */
13494         if (INTEL_GEN(dev_priv) >= 9)
13495                 return true;
13496
13497         /* Enable fastboot by default on VLV and CHV */
13498         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13499                 return true;
13500
13501         /* Disabled by default on all others */
13502         return false;
13503 }
13504
13505 static bool
13506 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13507                           const struct intel_crtc_state *pipe_config,
13508                           bool fastset)
13509 {
13510         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13511         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13512         bool ret = true;
13513         u32 bp_gamma = 0;
13514         bool fixup_inherited = fastset &&
13515                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13516                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13517
13518         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13519                 drm_dbg_kms(&dev_priv->drm,
13520                             "initial modeset and fastboot not set\n");
13521                 ret = false;
13522         }
13523
13524 #define PIPE_CONF_CHECK_X(name) do { \
13525         if (current_config->name != pipe_config->name) { \
13526                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13527                                      "(expected 0x%08x, found 0x%08x)", \
13528                                      current_config->name, \
13529                                      pipe_config->name); \
13530                 ret = false; \
13531         } \
13532 } while (0)
13533
13534 #define PIPE_CONF_CHECK_I(name) do { \
13535         if (current_config->name != pipe_config->name) { \
13536                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13537                                      "(expected %i, found %i)", \
13538                                      current_config->name, \
13539                                      pipe_config->name); \
13540                 ret = false; \
13541         } \
13542 } while (0)
13543
13544 #define PIPE_CONF_CHECK_BOOL(name) do { \
13545         if (current_config->name != pipe_config->name) { \
13546                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13547                                      "(expected %s, found %s)", \
13548                                      yesno(current_config->name), \
13549                                      yesno(pipe_config->name)); \
13550                 ret = false; \
13551         } \
13552 } while (0)
13553
13554 /*
13555  * Checks state where we only read out the enabling, but not the entire
13556  * state itself (like full infoframes or ELD for audio). These states
13557  * require a full modeset on bootup to fix up.
13558  */
13559 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13560         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13561                 PIPE_CONF_CHECK_BOOL(name); \
13562         } else { \
13563                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13564                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13565                                      yesno(current_config->name), \
13566                                      yesno(pipe_config->name)); \
13567                 ret = false; \
13568         } \
13569 } while (0)
13570
13571 #define PIPE_CONF_CHECK_P(name) do { \
13572         if (current_config->name != pipe_config->name) { \
13573                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13574                                      "(expected %p, found %p)", \
13575                                      current_config->name, \
13576                                      pipe_config->name); \
13577                 ret = false; \
13578         } \
13579 } while (0)
13580
13581 #define PIPE_CONF_CHECK_M_N(name) do { \
13582         if (!intel_compare_link_m_n(&current_config->name, \
13583                                     &pipe_config->name,\
13584                                     !fastset)) { \
13585                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13586                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13587                                      "found tu %i, gmch %i/%i link %i/%i)", \
13588                                      current_config->name.tu, \
13589                                      current_config->name.gmch_m, \
13590                                      current_config->name.gmch_n, \
13591                                      current_config->name.link_m, \
13592                                      current_config->name.link_n, \
13593                                      pipe_config->name.tu, \
13594                                      pipe_config->name.gmch_m, \
13595                                      pipe_config->name.gmch_n, \
13596                                      pipe_config->name.link_m, \
13597                                      pipe_config->name.link_n); \
13598                 ret = false; \
13599         } \
13600 } while (0)
13601
13602 /* This is required for BDW+ where there is only one set of registers for
13603  * switching between high and low RR.
13604  * This macro can be used whenever a comparison has to be made between one
13605  * hw state and multiple sw state variables.
13606  */
13607 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13608         if (!intel_compare_link_m_n(&current_config->name, \
13609                                     &pipe_config->name, !fastset) && \
13610             !intel_compare_link_m_n(&current_config->alt_name, \
13611                                     &pipe_config->name, !fastset)) { \
13612                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13613                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13614                                      "or tu %i gmch %i/%i link %i/%i, " \
13615                                      "found tu %i, gmch %i/%i link %i/%i)", \
13616                                      current_config->name.tu, \
13617                                      current_config->name.gmch_m, \
13618                                      current_config->name.gmch_n, \
13619                                      current_config->name.link_m, \
13620                                      current_config->name.link_n, \
13621                                      current_config->alt_name.tu, \
13622                                      current_config->alt_name.gmch_m, \
13623                                      current_config->alt_name.gmch_n, \
13624                                      current_config->alt_name.link_m, \
13625                                      current_config->alt_name.link_n, \
13626                                      pipe_config->name.tu, \
13627                                      pipe_config->name.gmch_m, \
13628                                      pipe_config->name.gmch_n, \
13629                                      pipe_config->name.link_m, \
13630                                      pipe_config->name.link_n); \
13631                 ret = false; \
13632         } \
13633 } while (0)
13634
13635 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13636         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13637                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13638                                      "(%x) (expected %i, found %i)", \
13639                                      (mask), \
13640                                      current_config->name & (mask), \
13641                                      pipe_config->name & (mask)); \
13642                 ret = false; \
13643         } \
13644 } while (0)
13645
13646 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13647         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13648                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13649                                      "(expected %i, found %i)", \
13650                                      current_config->name, \
13651                                      pipe_config->name); \
13652                 ret = false; \
13653         } \
13654 } while (0)
13655
13656 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13657         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13658                                      &pipe_config->infoframes.name)) { \
13659                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13660                                                &current_config->infoframes.name, \
13661                                                &pipe_config->infoframes.name); \
13662                 ret = false; \
13663         } \
13664 } while (0)
13665
13666 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13667         if (current_config->name1 != pipe_config->name1) { \
13668                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13669                                 "(expected %i, found %i, won't compare lut values)", \
13670                                 current_config->name1, \
13671                                 pipe_config->name1); \
13672                 ret = false;\
13673         } else { \
13674                 if (!intel_color_lut_equal(current_config->name2, \
13675                                         pipe_config->name2, pipe_config->name1, \
13676                                         bit_precision)) { \
13677                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13678                                         "hw_state doesn't match sw_state"); \
13679                         ret = false; \
13680                 } \
13681         } \
13682 } while (0)
13683
13684 #define PIPE_CONF_QUIRK(quirk) \
13685         ((current_config->quirks | pipe_config->quirks) & (quirk))
13686
13687         PIPE_CONF_CHECK_I(cpu_transcoder);
13688
13689         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13690         PIPE_CONF_CHECK_I(fdi_lanes);
13691         PIPE_CONF_CHECK_M_N(fdi_m_n);
13692
13693         PIPE_CONF_CHECK_I(lane_count);
13694         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13695
13696         if (INTEL_GEN(dev_priv) < 8) {
13697                 PIPE_CONF_CHECK_M_N(dp_m_n);
13698
13699                 if (current_config->has_drrs)
13700                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13701         } else
13702                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13703
13704         PIPE_CONF_CHECK_X(output_types);
13705
13706         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13707         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13708         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13709         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13710         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13711         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13712
13713         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13714         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13715         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13716         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13717         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13718         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13719
13720         PIPE_CONF_CHECK_I(pixel_multiplier);
13721         PIPE_CONF_CHECK_I(output_format);
13722         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13723         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13724             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13725                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13726
13727         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13728         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13729         PIPE_CONF_CHECK_BOOL(has_infoframe);
13730         PIPE_CONF_CHECK_BOOL(fec_enable);
13731
13732         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13733
13734         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13735                               DRM_MODE_FLAG_INTERLACE);
13736
13737         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13738                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13739                                       DRM_MODE_FLAG_PHSYNC);
13740                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13741                                       DRM_MODE_FLAG_NHSYNC);
13742                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13743                                       DRM_MODE_FLAG_PVSYNC);
13744                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13745                                       DRM_MODE_FLAG_NVSYNC);
13746         }
13747
13748         PIPE_CONF_CHECK_X(gmch_pfit.control);
13749         /* pfit ratios are autocomputed by the hw on gen4+ */
13750         if (INTEL_GEN(dev_priv) < 4)
13751                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13752         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13753
13754         /*
13755          * Changing the EDP transcoder input mux
13756          * (A_ONOFF vs. A_ON) requires a full modeset.
13757          */
13758         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13759
13760         if (!fastset) {
13761                 PIPE_CONF_CHECK_I(pipe_src_w);
13762                 PIPE_CONF_CHECK_I(pipe_src_h);
13763
13764                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13765                 if (current_config->pch_pfit.enabled) {
13766                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13767                         PIPE_CONF_CHECK_X(pch_pfit.size);
13768                 }
13769
13770                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13771                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13772
13773                 PIPE_CONF_CHECK_X(gamma_mode);
13774                 if (IS_CHERRYVIEW(dev_priv))
13775                         PIPE_CONF_CHECK_X(cgm_mode);
13776                 else
13777                         PIPE_CONF_CHECK_X(csc_mode);
13778                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13779                 PIPE_CONF_CHECK_BOOL(csc_enable);
13780
13781                 PIPE_CONF_CHECK_I(linetime);
13782                 PIPE_CONF_CHECK_I(ips_linetime);
13783
13784                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13785                 if (bp_gamma)
13786                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13787         }
13788
13789         PIPE_CONF_CHECK_BOOL(double_wide);
13790
13791         PIPE_CONF_CHECK_P(shared_dpll);
13792         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13793         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13794         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13795         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13796         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13797         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13798         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13799         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13800         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13801         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13802         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13803         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13804         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13805         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13806         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13807         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13808         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13809         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13810         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13811         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13812         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13813         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13814         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13815         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13816         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13817         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13818         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13819         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13820         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13821         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13822         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13823
13824         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13825         PIPE_CONF_CHECK_X(dsi_pll.div);
13826
13827         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13828                 PIPE_CONF_CHECK_I(pipe_bpp);
13829
13830         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13831         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13832
13833         PIPE_CONF_CHECK_I(min_voltage_level);
13834
13835         PIPE_CONF_CHECK_X(infoframes.enable);
13836         PIPE_CONF_CHECK_X(infoframes.gcp);
13837         PIPE_CONF_CHECK_INFOFRAME(avi);
13838         PIPE_CONF_CHECK_INFOFRAME(spd);
13839         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13840         PIPE_CONF_CHECK_INFOFRAME(drm);
13841
13842         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
13843         PIPE_CONF_CHECK_I(master_transcoder);
13844
13845         PIPE_CONF_CHECK_I(dsc.compression_enable);
13846         PIPE_CONF_CHECK_I(dsc.dsc_split);
13847         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
13848
13849         PIPE_CONF_CHECK_I(mst_master_transcoder);
13850
13851 #undef PIPE_CONF_CHECK_X
13852 #undef PIPE_CONF_CHECK_I
13853 #undef PIPE_CONF_CHECK_BOOL
13854 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13855 #undef PIPE_CONF_CHECK_P
13856 #undef PIPE_CONF_CHECK_FLAGS
13857 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13858 #undef PIPE_CONF_CHECK_COLOR_LUT
13859 #undef PIPE_CONF_QUIRK
13860
13861         return ret;
13862 }
13863
13864 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13865                                            const struct intel_crtc_state *pipe_config)
13866 {
13867         if (pipe_config->has_pch_encoder) {
13868                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13869                                                             &pipe_config->fdi_m_n);
13870                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13871
13872                 /*
13873                  * FDI already provided one idea for the dotclock.
13874                  * Yell if the encoder disagrees.
13875                  */
13876                 drm_WARN(&dev_priv->drm,
13877                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13878                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13879                          fdi_dotclock, dotclock);
13880         }
13881 }
13882
13883 static void verify_wm_state(struct intel_crtc *crtc,
13884                             struct intel_crtc_state *new_crtc_state)
13885 {
13886         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13887         struct skl_hw_state {
13888                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13889                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13890                 struct skl_pipe_wm wm;
13891         } *hw;
13892         struct skl_pipe_wm *sw_wm;
13893         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13894         u8 hw_enabled_slices;
13895         const enum pipe pipe = crtc->pipe;
13896         int plane, level, max_level = ilk_wm_max_level(dev_priv);
13897
13898         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13899                 return;
13900
13901         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13902         if (!hw)
13903                 return;
13904
13905         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13906         sw_wm = &new_crtc_state->wm.skl.optimal;
13907
13908         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13909
13910         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
13911
13912         if (INTEL_GEN(dev_priv) >= 11 &&
13913             hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
13914                 drm_err(&dev_priv->drm,
13915                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
13916                         dev_priv->enabled_dbuf_slices_mask,
13917                         hw_enabled_slices);
13918
13919         /* planes */
13920         for_each_universal_plane(dev_priv, pipe, plane) {
13921                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13922
13923                 hw_plane_wm = &hw->wm.planes[plane];
13924                 sw_plane_wm = &sw_wm->planes[plane];
13925
13926                 /* Watermarks */
13927                 for (level = 0; level <= max_level; level++) {
13928                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13929                                                 &sw_plane_wm->wm[level]))
13930                                 continue;
13931
13932                         drm_err(&dev_priv->drm,
13933                                 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13934                                 pipe_name(pipe), plane + 1, level,
13935                                 sw_plane_wm->wm[level].plane_en,
13936                                 sw_plane_wm->wm[level].plane_res_b,
13937                                 sw_plane_wm->wm[level].plane_res_l,
13938                                 hw_plane_wm->wm[level].plane_en,
13939                                 hw_plane_wm->wm[level].plane_res_b,
13940                                 hw_plane_wm->wm[level].plane_res_l);
13941                 }
13942
13943                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13944                                          &sw_plane_wm->trans_wm)) {
13945                         drm_err(&dev_priv->drm,
13946                                 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13947                                 pipe_name(pipe), plane + 1,
13948                                 sw_plane_wm->trans_wm.plane_en,
13949                                 sw_plane_wm->trans_wm.plane_res_b,
13950                                 sw_plane_wm->trans_wm.plane_res_l,
13951                                 hw_plane_wm->trans_wm.plane_en,
13952                                 hw_plane_wm->trans_wm.plane_res_b,
13953                                 hw_plane_wm->trans_wm.plane_res_l);
13954                 }
13955
13956                 /* DDB */
13957                 hw_ddb_entry = &hw->ddb_y[plane];
13958                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13959
13960                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13961                         drm_err(&dev_priv->drm,
13962                                 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13963                                 pipe_name(pipe), plane + 1,
13964                                 sw_ddb_entry->start, sw_ddb_entry->end,
13965                                 hw_ddb_entry->start, hw_ddb_entry->end);
13966                 }
13967         }
13968
13969         /*
13970          * cursor
13971          * If the cursor plane isn't active, we may not have updated it's ddb
13972          * allocation. In that case since the ddb allocation will be updated
13973          * once the plane becomes visible, we can skip this check
13974          */
13975         if (1) {
13976                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13977
13978                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13979                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13980
13981                 /* Watermarks */
13982                 for (level = 0; level <= max_level; level++) {
13983                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13984                                                 &sw_plane_wm->wm[level]))
13985                                 continue;
13986
13987                         drm_err(&dev_priv->drm,
13988                                 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13989                                 pipe_name(pipe), level,
13990                                 sw_plane_wm->wm[level].plane_en,
13991                                 sw_plane_wm->wm[level].plane_res_b,
13992                                 sw_plane_wm->wm[level].plane_res_l,
13993                                 hw_plane_wm->wm[level].plane_en,
13994                                 hw_plane_wm->wm[level].plane_res_b,
13995                                 hw_plane_wm->wm[level].plane_res_l);
13996                 }
13997
13998                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13999                                          &sw_plane_wm->trans_wm)) {
14000                         drm_err(&dev_priv->drm,
14001                                 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14002                                 pipe_name(pipe),
14003                                 sw_plane_wm->trans_wm.plane_en,
14004                                 sw_plane_wm->trans_wm.plane_res_b,
14005                                 sw_plane_wm->trans_wm.plane_res_l,
14006                                 hw_plane_wm->trans_wm.plane_en,
14007                                 hw_plane_wm->trans_wm.plane_res_b,
14008                                 hw_plane_wm->trans_wm.plane_res_l);
14009                 }
14010
14011                 /* DDB */
14012                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
14013                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
14014
14015                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14016                         drm_err(&dev_priv->drm,
14017                                 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
14018                                 pipe_name(pipe),
14019                                 sw_ddb_entry->start, sw_ddb_entry->end,
14020                                 hw_ddb_entry->start, hw_ddb_entry->end);
14021                 }
14022         }
14023
14024         kfree(hw);
14025 }
14026
14027 static void
14028 verify_connector_state(struct intel_atomic_state *state,
14029                        struct intel_crtc *crtc)
14030 {
14031         struct drm_connector *connector;
14032         struct drm_connector_state *new_conn_state;
14033         int i;
14034
14035         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
14036                 struct drm_encoder *encoder = connector->encoder;
14037                 struct intel_crtc_state *crtc_state = NULL;
14038
14039                 if (new_conn_state->crtc != &crtc->base)
14040                         continue;
14041
14042                 if (crtc)
14043                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
14044
14045                 intel_connector_verify_state(crtc_state, new_conn_state);
14046
14047                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
14048                      "connector's atomic encoder doesn't match legacy encoder\n");
14049         }
14050 }
14051
14052 static void
14053 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
14054 {
14055         struct intel_encoder *encoder;
14056         struct drm_connector *connector;
14057         struct drm_connector_state *old_conn_state, *new_conn_state;
14058         int i;
14059
14060         for_each_intel_encoder(&dev_priv->drm, encoder) {
14061                 bool enabled = false, found = false;
14062                 enum pipe pipe;
14063
14064                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
14065                             encoder->base.base.id,
14066                             encoder->base.name);
14067
14068                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
14069                                                    new_conn_state, i) {
14070                         if (old_conn_state->best_encoder == &encoder->base)
14071                                 found = true;
14072
14073                         if (new_conn_state->best_encoder != &encoder->base)
14074                                 continue;
14075                         found = enabled = true;
14076
14077                         I915_STATE_WARN(new_conn_state->crtc !=
14078                                         encoder->base.crtc,
14079                              "connector's crtc doesn't match encoder crtc\n");
14080                 }
14081
14082                 if (!found)
14083                         continue;
14084
14085                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
14086                      "encoder's enabled state mismatch "
14087                      "(expected %i, found %i)\n",
14088                      !!encoder->base.crtc, enabled);
14089
14090                 if (!encoder->base.crtc) {
14091                         bool active;
14092
14093                         active = encoder->get_hw_state(encoder, &pipe);
14094                         I915_STATE_WARN(active,
14095                              "encoder detached but still enabled on pipe %c.\n",
14096                              pipe_name(pipe));
14097                 }
14098         }
14099 }
14100
14101 static void
14102 verify_crtc_state(struct intel_crtc *crtc,
14103                   struct intel_crtc_state *old_crtc_state,
14104                   struct intel_crtc_state *new_crtc_state)
14105 {
14106         struct drm_device *dev = crtc->base.dev;
14107         struct drm_i915_private *dev_priv = to_i915(dev);
14108         struct intel_encoder *encoder;
14109         struct intel_crtc_state *pipe_config = old_crtc_state;
14110         struct drm_atomic_state *state = old_crtc_state->uapi.state;
14111         bool active;
14112
14113         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
14114         intel_crtc_free_hw_state(old_crtc_state);
14115         intel_crtc_state_reset(old_crtc_state, crtc);
14116         old_crtc_state->uapi.state = state;
14117
14118         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
14119                     crtc->base.name);
14120
14121         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
14122
14123         /* we keep both pipes enabled on 830 */
14124         if (IS_I830(dev_priv))
14125                 active = new_crtc_state->hw.active;
14126
14127         I915_STATE_WARN(new_crtc_state->hw.active != active,
14128                         "crtc active state doesn't match with hw state "
14129                         "(expected %i, found %i)\n",
14130                         new_crtc_state->hw.active, active);
14131
14132         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
14133                         "transitional active state does not match atomic hw state "
14134                         "(expected %i, found %i)\n",
14135                         new_crtc_state->hw.active, crtc->active);
14136
14137         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
14138                 enum pipe pipe;
14139
14140                 active = encoder->get_hw_state(encoder, &pipe);
14141                 I915_STATE_WARN(active != new_crtc_state->hw.active,
14142                                 "[ENCODER:%i] active %i with crtc active %i\n",
14143                                 encoder->base.base.id, active,
14144                                 new_crtc_state->hw.active);
14145
14146                 I915_STATE_WARN(active && crtc->pipe != pipe,
14147                                 "Encoder connected to wrong pipe %c\n",
14148                                 pipe_name(pipe));
14149
14150                 if (active)
14151                         encoder->get_config(encoder, pipe_config);
14152         }
14153
14154         intel_crtc_compute_pixel_rate(pipe_config);
14155
14156         if (!new_crtc_state->hw.active)
14157                 return;
14158
14159         intel_pipe_config_sanity_check(dev_priv, pipe_config);
14160
14161         if (!intel_pipe_config_compare(new_crtc_state,
14162                                        pipe_config, false)) {
14163                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
14164                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
14165                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
14166         }
14167 }
14168
14169 static void
14170 intel_verify_planes(struct intel_atomic_state *state)
14171 {
14172         struct intel_plane *plane;
14173         const struct intel_plane_state *plane_state;
14174         int i;
14175
14176         for_each_new_intel_plane_in_state(state, plane,
14177                                           plane_state, i)
14178                 assert_plane(plane, plane_state->planar_slave ||
14179                              plane_state->uapi.visible);
14180 }
14181
14182 static void
14183 verify_single_dpll_state(struct drm_i915_private *dev_priv,
14184                          struct intel_shared_dpll *pll,
14185                          struct intel_crtc *crtc,
14186                          struct intel_crtc_state *new_crtc_state)
14187 {
14188         struct intel_dpll_hw_state dpll_hw_state;
14189         unsigned int crtc_mask;
14190         bool active;
14191
14192         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
14193
14194         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
14195
14196         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
14197
14198         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
14199                 I915_STATE_WARN(!pll->on && pll->active_mask,
14200                      "pll in active use but not on in sw tracking\n");
14201                 I915_STATE_WARN(pll->on && !pll->active_mask,
14202                      "pll is on but not used by any active crtc\n");
14203                 I915_STATE_WARN(pll->on != active,
14204                      "pll on state mismatch (expected %i, found %i)\n",
14205                      pll->on, active);
14206         }
14207
14208         if (!crtc) {
14209                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
14210                                 "more active pll users than references: %x vs %x\n",
14211                                 pll->active_mask, pll->state.crtc_mask);
14212
14213                 return;
14214         }
14215
14216         crtc_mask = drm_crtc_mask(&crtc->base);
14217
14218         if (new_crtc_state->hw.active)
14219                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
14220                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
14221                                 pipe_name(crtc->pipe), pll->active_mask);
14222         else
14223                 I915_STATE_WARN(pll->active_mask & crtc_mask,
14224                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
14225                                 pipe_name(crtc->pipe), pll->active_mask);
14226
14227         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
14228                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
14229                         crtc_mask, pll->state.crtc_mask);
14230
14231         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
14232                                           &dpll_hw_state,
14233                                           sizeof(dpll_hw_state)),
14234                         "pll hw state mismatch\n");
14235 }
14236
14237 static void
14238 verify_shared_dpll_state(struct intel_crtc *crtc,
14239                          struct intel_crtc_state *old_crtc_state,
14240                          struct intel_crtc_state *new_crtc_state)
14241 {
14242         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14243
14244         if (new_crtc_state->shared_dpll)
14245                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
14246
14247         if (old_crtc_state->shared_dpll &&
14248             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
14249                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
14250                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
14251
14252                 I915_STATE_WARN(pll->active_mask & crtc_mask,
14253                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
14254                                 pipe_name(crtc->pipe));
14255                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
14256                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
14257                                 pipe_name(crtc->pipe));
14258         }
14259 }
14260
14261 static void
14262 intel_modeset_verify_crtc(struct intel_crtc *crtc,
14263                           struct intel_atomic_state *state,
14264                           struct intel_crtc_state *old_crtc_state,
14265                           struct intel_crtc_state *new_crtc_state)
14266 {
14267         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
14268                 return;
14269
14270         verify_wm_state(crtc, new_crtc_state);
14271         verify_connector_state(state, crtc);
14272         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
14273         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
14274 }
14275
14276 static void
14277 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
14278 {
14279         int i;
14280
14281         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
14282                 verify_single_dpll_state(dev_priv,
14283                                          &dev_priv->dpll.shared_dplls[i],
14284                                          NULL, NULL);
14285 }
14286
14287 static void
14288 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
14289                               struct intel_atomic_state *state)
14290 {
14291         verify_encoder_state(dev_priv, state);
14292         verify_connector_state(state, NULL);
14293         verify_disabled_dpll_state(dev_priv);
14294 }
14295
14296 static void
14297 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
14298 {
14299         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14300         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14301         const struct drm_display_mode *adjusted_mode =
14302                 &crtc_state->hw.adjusted_mode;
14303
14304         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
14305
14306         /*
14307          * The scanline counter increments at the leading edge of hsync.
14308          *
14309          * On most platforms it starts counting from vtotal-1 on the
14310          * first active line. That means the scanline counter value is
14311          * always one less than what we would expect. Ie. just after
14312          * start of vblank, which also occurs at start of hsync (on the
14313          * last active line), the scanline counter will read vblank_start-1.
14314          *
14315          * On gen2 the scanline counter starts counting from 1 instead
14316          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
14317          * to keep the value positive), instead of adding one.
14318          *
14319          * On HSW+ the behaviour of the scanline counter depends on the output
14320          * type. For DP ports it behaves like most other platforms, but on HDMI
14321          * there's an extra 1 line difference. So we need to add two instead of
14322          * one to the value.
14323          *
14324          * On VLV/CHV DSI the scanline counter would appear to increment
14325          * approx. 1/3 of a scanline before start of vblank. Unfortunately
14326          * that means we can't tell whether we're in vblank or not while
14327          * we're on that particular line. We must still set scanline_offset
14328          * to 1 so that the vblank timestamps come out correct when we query
14329          * the scanline counter from within the vblank interrupt handler.
14330          * However if queried just before the start of vblank we'll get an
14331          * answer that's slightly in the future.
14332          */
14333         if (IS_GEN(dev_priv, 2)) {
14334                 int vtotal;
14335
14336                 vtotal = adjusted_mode->crtc_vtotal;
14337                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
14338                         vtotal /= 2;
14339
14340                 crtc->scanline_offset = vtotal - 1;
14341         } else if (HAS_DDI(dev_priv) &&
14342                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
14343                 crtc->scanline_offset = 2;
14344         } else {
14345                 crtc->scanline_offset = 1;
14346         }
14347 }
14348
14349 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
14350 {
14351         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14352         struct intel_crtc_state *new_crtc_state;
14353         struct intel_crtc *crtc;
14354         int i;
14355
14356         if (!dev_priv->display.crtc_compute_clock)
14357                 return;
14358
14359         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14360                 if (!needs_modeset(new_crtc_state))
14361                         continue;
14362
14363                 intel_release_shared_dplls(state, crtc);
14364         }
14365 }
14366
14367 /*
14368  * This implements the workaround described in the "notes" section of the mode
14369  * set sequence documentation. When going from no pipes or single pipe to
14370  * multiple pipes, and planes are enabled after the pipe, we need to wait at
14371  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
14372  */
14373 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
14374 {
14375         struct intel_crtc_state *crtc_state;
14376         struct intel_crtc *crtc;
14377         struct intel_crtc_state *first_crtc_state = NULL;
14378         struct intel_crtc_state *other_crtc_state = NULL;
14379         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14380         int i;
14381
14382         /* look at all crtc's that are going to be enabled in during modeset */
14383         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14384                 if (!crtc_state->hw.active ||
14385                     !needs_modeset(crtc_state))
14386                         continue;
14387
14388                 if (first_crtc_state) {
14389                         other_crtc_state = crtc_state;
14390                         break;
14391                 } else {
14392                         first_crtc_state = crtc_state;
14393                         first_pipe = crtc->pipe;
14394                 }
14395         }
14396
14397         /* No workaround needed? */
14398         if (!first_crtc_state)
14399                 return 0;
14400
14401         /* w/a possibly needed, check how many crtc's are already enabled. */
14402         for_each_intel_crtc(state->base.dev, crtc) {
14403                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14404                 if (IS_ERR(crtc_state))
14405                         return PTR_ERR(crtc_state);
14406
14407                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14408
14409                 if (!crtc_state->hw.active ||
14410                     needs_modeset(crtc_state))
14411                         continue;
14412
14413                 /* 2 or more enabled crtcs means no need for w/a */
14414                 if (enabled_pipe != INVALID_PIPE)
14415                         return 0;
14416
14417                 enabled_pipe = crtc->pipe;
14418         }
14419
14420         if (enabled_pipe != INVALID_PIPE)
14421                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
14422         else if (other_crtc_state)
14423                 other_crtc_state->hsw_workaround_pipe = first_pipe;
14424
14425         return 0;
14426 }
14427
14428 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
14429                            u8 active_pipes)
14430 {
14431         const struct intel_crtc_state *crtc_state;
14432         struct intel_crtc *crtc;
14433         int i;
14434
14435         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14436                 if (crtc_state->hw.active)
14437                         active_pipes |= BIT(crtc->pipe);
14438                 else
14439                         active_pipes &= ~BIT(crtc->pipe);
14440         }
14441
14442         return active_pipes;
14443 }
14444
14445 static int intel_modeset_checks(struct intel_atomic_state *state)
14446 {
14447         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14448         int ret;
14449
14450         state->modeset = true;
14451         state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
14452
14453         state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
14454
14455         if (state->active_pipe_changes) {
14456                 ret = _intel_atomic_lock_global_state(state);
14457                 if (ret)
14458                         return ret;
14459         }
14460
14461         ret = intel_modeset_calc_cdclk(state);
14462         if (ret)
14463                 return ret;
14464
14465         intel_modeset_clear_plls(state);
14466
14467         if (IS_HASWELL(dev_priv))
14468                 return hsw_mode_set_planes_workaround(state);
14469
14470         return 0;
14471 }
14472
14473 /*
14474  * Handle calculation of various watermark data at the end of the atomic check
14475  * phase.  The code here should be run after the per-crtc and per-plane 'check'
14476  * handlers to ensure that all derived state has been updated.
14477  */
14478 static int calc_watermark_data(struct intel_atomic_state *state)
14479 {
14480         struct drm_device *dev = state->base.dev;
14481         struct drm_i915_private *dev_priv = to_i915(dev);
14482
14483         /* Is there platform-specific watermark information to calculate? */
14484         if (dev_priv->display.compute_global_watermarks)
14485                 return dev_priv->display.compute_global_watermarks(state);
14486
14487         return 0;
14488 }
14489
14490 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14491                                      struct intel_crtc_state *new_crtc_state)
14492 {
14493         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14494                 return;
14495
14496         new_crtc_state->uapi.mode_changed = false;
14497         new_crtc_state->update_pipe = true;
14498 }
14499
14500 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
14501                                     struct intel_crtc_state *new_crtc_state)
14502 {
14503         /*
14504          * If we're not doing the full modeset we want to
14505          * keep the current M/N values as they may be
14506          * sufficiently different to the computed values
14507          * to cause problems.
14508          *
14509          * FIXME: should really copy more fuzzy state here
14510          */
14511         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14512         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14513         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14514         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14515 }
14516
14517 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14518                                           struct intel_crtc *crtc,
14519                                           u8 plane_ids_mask)
14520 {
14521         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14522         struct intel_plane *plane;
14523
14524         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14525                 struct intel_plane_state *plane_state;
14526
14527                 if ((plane_ids_mask & BIT(plane->id)) == 0)
14528                         continue;
14529
14530                 plane_state = intel_atomic_get_plane_state(state, plane);
14531                 if (IS_ERR(plane_state))
14532                         return PTR_ERR(plane_state);
14533         }
14534
14535         return 0;
14536 }
14537
14538 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14539 {
14540         /* See {hsw,vlv,ivb}_plane_ratio() */
14541         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14542                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14543                 IS_IVYBRIDGE(dev_priv);
14544 }
14545
14546 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14547                                      bool *need_cdclk_calc)
14548 {
14549         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14550         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14551         struct intel_plane_state *plane_state;
14552         struct intel_plane *plane;
14553         struct intel_crtc *crtc;
14554         int i, ret;
14555
14556         ret = icl_add_linked_planes(state);
14557         if (ret)
14558                 return ret;
14559
14560         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14561                 ret = intel_plane_atomic_check(state, plane);
14562                 if (ret) {
14563                         drm_dbg_atomic(&dev_priv->drm,
14564                                        "[PLANE:%d:%s] atomic driver check failed\n",
14565                                        plane->base.base.id, plane->base.name);
14566                         return ret;
14567                 }
14568         }
14569
14570         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14571                                             new_crtc_state, i) {
14572                 u8 old_active_planes, new_active_planes;
14573
14574                 ret = icl_check_nv12_planes(new_crtc_state);
14575                 if (ret)
14576                         return ret;
14577
14578                 /*
14579                  * On some platforms the number of active planes affects
14580                  * the planes' minimum cdclk calculation. Add such planes
14581                  * to the state before we compute the minimum cdclk.
14582                  */
14583                 if (!active_planes_affects_min_cdclk(dev_priv))
14584                         continue;
14585
14586                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14587                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14588
14589                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14590                         continue;
14591
14592                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14593                 if (ret)
14594                         return ret;
14595         }
14596
14597         /*
14598          * active_planes bitmask has been updated, and potentially
14599          * affected planes are part of the state. We can now
14600          * compute the minimum cdclk for each plane.
14601          */
14602         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14603                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
14604                 if (ret)
14605                         return ret;
14606         }
14607
14608         return 0;
14609 }
14610
14611 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14612 {
14613         struct intel_crtc_state *crtc_state;
14614         struct intel_crtc *crtc;
14615         int i;
14616
14617         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14618                 int ret = intel_crtc_atomic_check(state, crtc);
14619                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14620                 if (ret) {
14621                         drm_dbg_atomic(&i915->drm,
14622                                        "[CRTC:%d:%s] atomic driver check failed\n",
14623                                        crtc->base.base.id, crtc->base.name);
14624                         return ret;
14625                 }
14626         }
14627
14628         return 0;
14629 }
14630
14631 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
14632                                                u8 transcoders)
14633 {
14634         const struct intel_crtc_state *new_crtc_state;
14635         struct intel_crtc *crtc;
14636         int i;
14637
14638         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14639                 if (new_crtc_state->hw.enable &&
14640                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
14641                     needs_modeset(new_crtc_state))
14642                         return true;
14643         }
14644
14645         return false;
14646 }
14647
14648 /**
14649  * intel_atomic_check - validate state object
14650  * @dev: drm device
14651  * @_state: state to validate
14652  */
14653 static int intel_atomic_check(struct drm_device *dev,
14654                               struct drm_atomic_state *_state)
14655 {
14656         struct drm_i915_private *dev_priv = to_i915(dev);
14657         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14658         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14659         struct intel_cdclk_state *new_cdclk_state;
14660         struct intel_crtc *crtc;
14661         int ret, i;
14662         bool any_ms = false;
14663
14664         /* Catch I915_MODE_FLAG_INHERITED */
14665         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14666                                             new_crtc_state, i) {
14667                 if (new_crtc_state->uapi.mode.private_flags !=
14668                     old_crtc_state->uapi.mode.private_flags)
14669                         new_crtc_state->uapi.mode_changed = true;
14670         }
14671
14672         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14673         if (ret)
14674                 goto fail;
14675
14676         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14677                                             new_crtc_state, i) {
14678                 if (!needs_modeset(new_crtc_state)) {
14679                         /* Light copy */
14680                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14681
14682                         continue;
14683                 }
14684
14685                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14686                 if (ret)
14687                         goto fail;
14688
14689                 if (!new_crtc_state->hw.enable)
14690                         continue;
14691
14692                 ret = intel_modeset_pipe_config(new_crtc_state);
14693                 if (ret)
14694                         goto fail;
14695         }
14696
14697         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14698                                             new_crtc_state, i) {
14699                 if (!needs_modeset(new_crtc_state))
14700                         continue;
14701
14702                 ret = intel_modeset_pipe_config_late(new_crtc_state);
14703                 if (ret)
14704                         goto fail;
14705
14706                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14707         }
14708
14709         /**
14710          * Check if fastset is allowed by external dependencies like other
14711          * pipes and transcoders.
14712          *
14713          * Right now it only forces a fullmodeset when the MST master
14714          * transcoder did not changed but the pipe of the master transcoder
14715          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
14716          * in case of port synced crtcs, if one of the synced crtcs
14717          * needs a full modeset, all other synced crtcs should be
14718          * forced a full modeset.
14719          */
14720         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14721                 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
14722                         continue;
14723
14724                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
14725                         enum transcoder master = new_crtc_state->mst_master_transcoder;
14726
14727                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
14728                                 new_crtc_state->uapi.mode_changed = true;
14729                                 new_crtc_state->update_pipe = false;
14730                         }
14731                 }
14732
14733                 if (is_trans_port_sync_mode(new_crtc_state)) {
14734                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
14735
14736                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
14737                                 trans |= BIT(new_crtc_state->master_transcoder);
14738
14739                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
14740                                 new_crtc_state->uapi.mode_changed = true;
14741                                 new_crtc_state->update_pipe = false;
14742                         }
14743                 }
14744         }
14745
14746         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14747                                             new_crtc_state, i) {
14748                 if (needs_modeset(new_crtc_state)) {
14749                         any_ms = true;
14750                         continue;
14751                 }
14752
14753                 if (!new_crtc_state->update_pipe)
14754                         continue;
14755
14756                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
14757         }
14758
14759         if (any_ms && !check_digital_port_conflicts(state)) {
14760                 drm_dbg_kms(&dev_priv->drm,
14761                             "rejecting conflicting digital port configuration\n");
14762                 ret = EINVAL;
14763                 goto fail;
14764         }
14765
14766         ret = drm_dp_mst_atomic_check(&state->base);
14767         if (ret)
14768                 goto fail;
14769
14770         ret = intel_atomic_check_planes(state, &any_ms);
14771         if (ret)
14772                 goto fail;
14773
14774         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
14775         if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
14776                 any_ms = true;
14777
14778         /*
14779          * distrust_bios_wm will force a full dbuf recomputation
14780          * but the hardware state will only get updated accordingly
14781          * if state->modeset==true. Hence distrust_bios_wm==true &&
14782          * state->modeset==false is an invalid combination which
14783          * would cause the hardware and software dbuf state to get
14784          * out of sync. We must prevent that.
14785          *
14786          * FIXME clean up this mess and introduce better
14787          * state tracking for dbuf.
14788          */
14789         if (dev_priv->wm.distrust_bios_wm)
14790                 any_ms = true;
14791
14792         if (any_ms) {
14793                 ret = intel_modeset_checks(state);
14794                 if (ret)
14795                         goto fail;
14796         }
14797
14798         ret = intel_atomic_check_crtcs(state);
14799         if (ret)
14800                 goto fail;
14801
14802         intel_fbc_choose_crtc(dev_priv, state);
14803         ret = calc_watermark_data(state);
14804         if (ret)
14805                 goto fail;
14806
14807         ret = intel_bw_atomic_check(state);
14808         if (ret)
14809                 goto fail;
14810
14811         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14812                                             new_crtc_state, i) {
14813                 if (!needs_modeset(new_crtc_state) &&
14814                     !new_crtc_state->update_pipe)
14815                         continue;
14816
14817                 intel_dump_pipe_config(new_crtc_state, state,
14818                                        needs_modeset(new_crtc_state) ?
14819                                        "[modeset]" : "[fastset]");
14820         }
14821
14822         return 0;
14823
14824  fail:
14825         if (ret == -EDEADLK)
14826                 return ret;
14827
14828         /*
14829          * FIXME would probably be nice to know which crtc specifically
14830          * caused the failure, in cases where we can pinpoint it.
14831          */
14832         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14833                                             new_crtc_state, i)
14834                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14835
14836         return ret;
14837 }
14838
14839 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14840 {
14841         return drm_atomic_helper_prepare_planes(state->base.dev,
14842                                                 &state->base);
14843 }
14844
14845 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14846 {
14847         struct drm_device *dev = crtc->base.dev;
14848         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14849
14850         if (!vblank->max_vblank_count)
14851                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14852
14853         return crtc->base.funcs->get_vblank_counter(&crtc->base);
14854 }
14855
14856 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14857                                   struct intel_crtc_state *crtc_state)
14858 {
14859         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14860
14861         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
14862                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14863
14864         if (crtc_state->has_pch_encoder) {
14865                 enum pipe pch_transcoder =
14866                         intel_crtc_pch_transcoder(crtc);
14867
14868                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14869         }
14870 }
14871
14872 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14873                                const struct intel_crtc_state *new_crtc_state)
14874 {
14875         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14876         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14877
14878         /*
14879          * Update pipe size and adjust fitter if needed: the reason for this is
14880          * that in compute_mode_changes we check the native mode (not the pfit
14881          * mode) to see if we can flip rather than do a full mode set. In the
14882          * fastboot case, we'll flip, but if we don't update the pipesrc and
14883          * pfit state, we'll end up with a big fb scanned out into the wrong
14884          * sized surface.
14885          */
14886         intel_set_pipe_src_size(new_crtc_state);
14887
14888         /* on skylake this is done by detaching scalers */
14889         if (INTEL_GEN(dev_priv) >= 9) {
14890                 skl_detach_scalers(new_crtc_state);
14891
14892                 if (new_crtc_state->pch_pfit.enabled)
14893                         skl_pfit_enable(new_crtc_state);
14894         } else if (HAS_PCH_SPLIT(dev_priv)) {
14895                 if (new_crtc_state->pch_pfit.enabled)
14896                         ilk_pfit_enable(new_crtc_state);
14897                 else if (old_crtc_state->pch_pfit.enabled)
14898                         ilk_pfit_disable(old_crtc_state);
14899         }
14900
14901         /*
14902          * The register is supposedly single buffered so perhaps
14903          * not 100% correct to do this here. But SKL+ calculate
14904          * this based on the adjust pixel rate so pfit changes do
14905          * affect it and so it must be updated for fastsets.
14906          * HSW/BDW only really need this here for fastboot, after
14907          * that the value should not change without a full modeset.
14908          */
14909         if (INTEL_GEN(dev_priv) >= 9 ||
14910             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14911                 hsw_set_linetime_wm(new_crtc_state);
14912
14913         if (INTEL_GEN(dev_priv) >= 11)
14914                 icl_set_pipe_chicken(crtc);
14915 }
14916
14917 static void commit_pipe_config(struct intel_atomic_state *state,
14918                                struct intel_crtc *crtc)
14919 {
14920         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14921         const struct intel_crtc_state *old_crtc_state =
14922                 intel_atomic_get_old_crtc_state(state, crtc);
14923         const struct intel_crtc_state *new_crtc_state =
14924                 intel_atomic_get_new_crtc_state(state, crtc);
14925         bool modeset = needs_modeset(new_crtc_state);
14926
14927         /*
14928          * During modesets pipe configuration was programmed as the
14929          * CRTC was enabled.
14930          */
14931         if (!modeset) {
14932                 if (new_crtc_state->uapi.color_mgmt_changed ||
14933                     new_crtc_state->update_pipe)
14934                         intel_color_commit(new_crtc_state);
14935
14936                 if (INTEL_GEN(dev_priv) >= 9)
14937                         skl_detach_scalers(new_crtc_state);
14938
14939                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14940                         bdw_set_pipemisc(new_crtc_state);
14941
14942                 if (new_crtc_state->update_pipe)
14943                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
14944         }
14945
14946         if (dev_priv->display.atomic_update_watermarks)
14947                 dev_priv->display.atomic_update_watermarks(state, crtc);
14948 }
14949
14950 static void intel_enable_crtc(struct intel_atomic_state *state,
14951                               struct intel_crtc *crtc)
14952 {
14953         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14954         const struct intel_crtc_state *new_crtc_state =
14955                 intel_atomic_get_new_crtc_state(state, crtc);
14956
14957         if (!needs_modeset(new_crtc_state))
14958                 return;
14959
14960         intel_crtc_update_active_timings(new_crtc_state);
14961
14962         dev_priv->display.crtc_enable(state, crtc);
14963
14964         /* vblanks work again, re-enable pipe CRC. */
14965         intel_crtc_enable_pipe_crc(crtc);
14966 }
14967
14968 static void intel_update_crtc(struct intel_atomic_state *state,
14969                               struct intel_crtc *crtc)
14970 {
14971         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14972         const struct intel_crtc_state *old_crtc_state =
14973                 intel_atomic_get_old_crtc_state(state, crtc);
14974         struct intel_crtc_state *new_crtc_state =
14975                 intel_atomic_get_new_crtc_state(state, crtc);
14976         bool modeset = needs_modeset(new_crtc_state);
14977
14978         if (!modeset) {
14979                 if (new_crtc_state->preload_luts &&
14980                     (new_crtc_state->uapi.color_mgmt_changed ||
14981                      new_crtc_state->update_pipe))
14982                         intel_color_load_luts(new_crtc_state);
14983
14984                 intel_pre_plane_update(state, crtc);
14985
14986                 if (new_crtc_state->update_pipe)
14987                         intel_encoders_update_pipe(state, crtc);
14988         }
14989
14990         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14991                 intel_fbc_disable(crtc);
14992         else
14993                 intel_fbc_enable(state, crtc);
14994
14995         /* Perform vblank evasion around commit operation */
14996         intel_pipe_update_start(new_crtc_state);
14997
14998         commit_pipe_config(state, crtc);
14999
15000         if (INTEL_GEN(dev_priv) >= 9)
15001                 skl_update_planes_on_crtc(state, crtc);
15002         else
15003                 i9xx_update_planes_on_crtc(state, crtc);
15004
15005         intel_pipe_update_end(new_crtc_state);
15006
15007         /*
15008          * We usually enable FIFO underrun interrupts as part of the
15009          * CRTC enable sequence during modesets.  But when we inherit a
15010          * valid pipe configuration from the BIOS we need to take care
15011          * of enabling them on the CRTC's first fastset.
15012          */
15013         if (new_crtc_state->update_pipe && !modeset &&
15014             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
15015                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15016 }
15017
15018
15019 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
15020                                           struct intel_crtc_state *old_crtc_state,
15021                                           struct intel_crtc_state *new_crtc_state,
15022                                           struct intel_crtc *crtc)
15023 {
15024         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15025
15026         intel_crtc_disable_planes(state, crtc);
15027
15028         /*
15029          * We need to disable pipe CRC before disabling the pipe,
15030          * or we race against vblank off.
15031          */
15032         intel_crtc_disable_pipe_crc(crtc);
15033
15034         dev_priv->display.crtc_disable(state, crtc);
15035         crtc->active = false;
15036         intel_fbc_disable(crtc);
15037         intel_disable_shared_dpll(old_crtc_state);
15038
15039         /* FIXME unify this for all platforms */
15040         if (!new_crtc_state->hw.active &&
15041             !HAS_GMCH(dev_priv) &&
15042             dev_priv->display.initial_watermarks)
15043                 dev_priv->display.initial_watermarks(state, crtc);
15044 }
15045
15046 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
15047 {
15048         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15049         struct intel_crtc *crtc;
15050         u32 handled = 0;
15051         int i;
15052
15053         /* Only disable port sync and MST slaves */
15054         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15055                                             new_crtc_state, i) {
15056                 if (!needs_modeset(new_crtc_state))
15057                         continue;
15058
15059                 if (!old_crtc_state->hw.active)
15060                         continue;
15061
15062                 /* In case of Transcoder port Sync master slave CRTCs can be
15063                  * assigned in any order and we need to make sure that
15064                  * slave CRTCs are disabled first and then master CRTC since
15065                  * Slave vblanks are masked till Master Vblanks.
15066                  */
15067                 if (!is_trans_port_sync_slave(old_crtc_state) &&
15068                     !intel_dp_mst_is_slave_trans(old_crtc_state))
15069                         continue;
15070
15071                 intel_pre_plane_update(state, crtc);
15072                 intel_old_crtc_state_disables(state, old_crtc_state,
15073                                               new_crtc_state, crtc);
15074                 handled |= BIT(crtc->pipe);
15075         }
15076
15077         /* Disable everything else left on */
15078         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15079                                             new_crtc_state, i) {
15080                 if (!needs_modeset(new_crtc_state) ||
15081                     (handled & BIT(crtc->pipe)))
15082                         continue;
15083
15084                 intel_pre_plane_update(state, crtc);
15085                 if (old_crtc_state->hw.active)
15086                         intel_old_crtc_state_disables(state, old_crtc_state,
15087                                                       new_crtc_state, crtc);
15088         }
15089 }
15090
15091 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
15092 {
15093         struct intel_crtc_state *new_crtc_state;
15094         struct intel_crtc *crtc;
15095         int i;
15096
15097         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15098                 if (!new_crtc_state->hw.active)
15099                         continue;
15100
15101                 intel_enable_crtc(state, crtc);
15102                 intel_update_crtc(state, crtc);
15103         }
15104 }
15105
15106 static void intel_set_dp_tp_ctl_normal(struct intel_atomic_state *state,
15107                                        struct intel_crtc *crtc)
15108 {
15109         struct drm_connector *uninitialized_var(conn);
15110         struct drm_connector_state *conn_state;
15111         struct intel_dp *intel_dp;
15112         int i;
15113
15114         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
15115                 if (conn_state->crtc == &crtc->base)
15116                         break;
15117         }
15118         intel_dp = intel_attached_dp(to_intel_connector(conn));
15119         intel_dp_stop_link_train(intel_dp);
15120 }
15121
15122 static void intel_update_trans_port_sync_crtcs(struct intel_atomic_state *state,
15123                                                struct intel_crtc *crtc)
15124 {
15125         struct drm_i915_private *i915 = to_i915(state->base.dev);
15126         const struct intel_crtc_state *new_slave_crtc_state;
15127         const struct intel_crtc_state *new_crtc_state =
15128                 intel_atomic_get_new_crtc_state(state, crtc);
15129         struct intel_crtc *slave_crtc;
15130         int i;
15131
15132         for_each_new_intel_crtc_in_state(state, slave_crtc,
15133                                          new_slave_crtc_state, i) {
15134                 if (new_slave_crtc_state->master_transcoder !=
15135                     new_crtc_state->cpu_transcoder)
15136                         continue;
15137
15138                 drm_dbg_kms(&i915->drm,
15139                             "Updating transcoder port sync slave [CRTC:%d:%s]\n",
15140                             slave_crtc->base.base.id, slave_crtc->base.name);
15141
15142                 intel_enable_crtc(state, slave_crtc);
15143         }
15144
15145         drm_dbg_kms(&i915->drm,
15146                     "Updating transcoder port sync master [CRTC:%d:%s]\n",
15147                     crtc->base.base.id, crtc->base.name);
15148
15149         intel_enable_crtc(state, crtc);
15150
15151         for_each_new_intel_crtc_in_state(state, slave_crtc,
15152                                          new_slave_crtc_state, i) {
15153                 if (new_slave_crtc_state->master_transcoder !=
15154                     new_crtc_state->cpu_transcoder)
15155                         continue;
15156
15157                 intel_set_dp_tp_ctl_normal(state, slave_crtc);
15158         }
15159
15160         usleep_range(200, 400);
15161         intel_set_dp_tp_ctl_normal(state, crtc);
15162 }
15163
15164 static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
15165 {
15166         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15167         u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
15168         u8 required_slices = state->enabled_dbuf_slices_mask;
15169         u8 slices_union = hw_enabled_slices | required_slices;
15170
15171         /* If 2nd DBuf slice required, enable it here */
15172         if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
15173                 icl_dbuf_slices_update(dev_priv, slices_union);
15174 }
15175
15176 static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
15177 {
15178         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15179         u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
15180         u8 required_slices = state->enabled_dbuf_slices_mask;
15181
15182         /* If 2nd DBuf slice is no more required disable it */
15183         if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
15184                 icl_dbuf_slices_update(dev_priv, required_slices);
15185 }
15186
15187 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
15188 {
15189         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15190         struct intel_crtc *crtc;
15191         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15192         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
15193         u8 update_pipes = 0, modeset_pipes = 0;
15194         int i;
15195
15196         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15197                 enum pipe pipe = crtc->pipe;
15198
15199                 if (!new_crtc_state->hw.active)
15200                         continue;
15201
15202                 /* ignore allocations for crtc's that have been turned off. */
15203                 if (!needs_modeset(new_crtc_state)) {
15204                         entries[pipe] = old_crtc_state->wm.skl.ddb;
15205                         update_pipes |= BIT(pipe);
15206                 } else {
15207                         modeset_pipes |= BIT(pipe);
15208                 }
15209         }
15210
15211         /*
15212          * Whenever the number of active pipes changes, we need to make sure we
15213          * update the pipes in the right order so that their ddb allocations
15214          * never overlap with each other between CRTC updates. Otherwise we'll
15215          * cause pipe underruns and other bad stuff.
15216          *
15217          * So first lets enable all pipes that do not need a fullmodeset as
15218          * those don't have any external dependency.
15219          */
15220         while (update_pipes) {
15221                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15222                                                     new_crtc_state, i) {
15223                         enum pipe pipe = crtc->pipe;
15224
15225                         if ((update_pipes & BIT(pipe)) == 0)
15226                                 continue;
15227
15228                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15229                                                         entries, I915_MAX_PIPES, pipe))
15230                                 continue;
15231
15232                         entries[pipe] = new_crtc_state->wm.skl.ddb;
15233                         update_pipes &= ~BIT(pipe);
15234
15235                         intel_update_crtc(state, crtc);
15236
15237                         /*
15238                          * If this is an already active pipe, it's DDB changed,
15239                          * and this isn't the last pipe that needs updating
15240                          * then we need to wait for a vblank to pass for the
15241                          * new ddb allocation to take effect.
15242                          */
15243                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
15244                                                  &old_crtc_state->wm.skl.ddb) &&
15245                             (update_pipes | modeset_pipes))
15246                                 intel_wait_for_vblank(dev_priv, pipe);
15247                 }
15248         }
15249
15250         update_pipes = modeset_pipes;
15251
15252         /*
15253          * Enable all pipes that needs a modeset and do not depends on other
15254          * pipes
15255          */
15256         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15257                 enum pipe pipe = crtc->pipe;
15258
15259                 if ((modeset_pipes & BIT(pipe)) == 0)
15260                         continue;
15261
15262                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
15263                     is_trans_port_sync_slave(new_crtc_state))
15264                         continue;
15265
15266                 modeset_pipes &= ~BIT(pipe);
15267
15268                 if (is_trans_port_sync_mode(new_crtc_state)) {
15269                         const struct intel_crtc_state *new_slave_crtc_state;
15270                         struct intel_crtc *slave_crtc;
15271                         int i;
15272
15273                         intel_update_trans_port_sync_crtcs(state, crtc);
15274
15275                         for_each_new_intel_crtc_in_state(state, slave_crtc,
15276                                                          new_slave_crtc_state, i) {
15277                                 if (new_slave_crtc_state->master_transcoder !=
15278                                     new_crtc_state->cpu_transcoder)
15279                                         continue;
15280
15281                                 modeset_pipes &= ~BIT(slave_crtc->pipe);
15282                         }
15283                 } else {
15284                         intel_enable_crtc(state, crtc);
15285                 }
15286         }
15287
15288         /*
15289          * Then we enable all remaining pipes that depend on other
15290          * pipes, right now it is only MST slaves as both port sync
15291          * slave and master are enabled together
15292          */
15293         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15294                 enum pipe pipe = crtc->pipe;
15295
15296                 if ((modeset_pipes & BIT(pipe)) == 0)
15297                         continue;
15298
15299                 modeset_pipes &= ~BIT(pipe);
15300
15301                 intel_enable_crtc(state, crtc);
15302         }
15303
15304         /*
15305          * Finally we do the plane updates/etc. for all pipes that got enabled.
15306          */
15307         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15308                 enum pipe pipe = crtc->pipe;
15309
15310                 if ((update_pipes & BIT(pipe)) == 0)
15311                         continue;
15312
15313                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15314                                                                         entries, I915_MAX_PIPES, pipe));
15315
15316                 entries[pipe] = new_crtc_state->wm.skl.ddb;
15317                 update_pipes &= ~BIT(pipe);
15318
15319                 intel_update_crtc(state, crtc);
15320         }
15321
15322         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
15323         drm_WARN_ON(&dev_priv->drm, update_pipes);
15324 }
15325
15326 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
15327 {
15328         struct intel_atomic_state *state, *next;
15329         struct llist_node *freed;
15330
15331         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
15332         llist_for_each_entry_safe(state, next, freed, freed)
15333                 drm_atomic_state_put(&state->base);
15334 }
15335
15336 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
15337 {
15338         struct drm_i915_private *dev_priv =
15339                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
15340
15341         intel_atomic_helper_free_state(dev_priv);
15342 }
15343
15344 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
15345 {
15346         struct wait_queue_entry wait_fence, wait_reset;
15347         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
15348
15349         init_wait_entry(&wait_fence, 0);
15350         init_wait_entry(&wait_reset, 0);
15351         for (;;) {
15352                 prepare_to_wait(&intel_state->commit_ready.wait,
15353                                 &wait_fence, TASK_UNINTERRUPTIBLE);
15354                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15355                                               I915_RESET_MODESET),
15356                                 &wait_reset, TASK_UNINTERRUPTIBLE);
15357
15358
15359                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
15360                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
15361                         break;
15362
15363                 schedule();
15364         }
15365         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
15366         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15367                                   I915_RESET_MODESET),
15368                     &wait_reset);
15369 }
15370
15371 static void intel_atomic_cleanup_work(struct work_struct *work)
15372 {
15373         struct drm_atomic_state *state =
15374                 container_of(work, struct drm_atomic_state, commit_work);
15375         struct drm_i915_private *i915 = to_i915(state->dev);
15376
15377         drm_atomic_helper_cleanup_planes(&i915->drm, state);
15378         drm_atomic_helper_commit_cleanup_done(state);
15379         drm_atomic_state_put(state);
15380
15381         intel_atomic_helper_free_state(i915);
15382 }
15383
15384 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
15385 {
15386         struct drm_device *dev = state->base.dev;
15387         struct drm_i915_private *dev_priv = to_i915(dev);
15388         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15389         struct intel_crtc *crtc;
15390         u64 put_domains[I915_MAX_PIPES] = {};
15391         intel_wakeref_t wakeref = 0;
15392         int i;
15393
15394         intel_atomic_commit_fence_wait(state);
15395
15396         drm_atomic_helper_wait_for_dependencies(&state->base);
15397
15398         if (state->modeset)
15399                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
15400
15401         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15402                                             new_crtc_state, i) {
15403                 if (needs_modeset(new_crtc_state) ||
15404                     new_crtc_state->update_pipe) {
15405
15406                         put_domains[crtc->pipe] =
15407                                 modeset_get_crtc_power_domains(new_crtc_state);
15408                 }
15409         }
15410
15411         intel_commit_modeset_disables(state);
15412
15413         /* FIXME: Eventually get rid of our crtc->config pointer */
15414         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15415                 crtc->config = new_crtc_state;
15416
15417         if (state->modeset) {
15418                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
15419
15420                 intel_set_cdclk_pre_plane_update(state);
15421
15422                 /*
15423                  * SKL workaround: bspec recommends we disable the SAGV when we
15424                  * have more then one pipe enabled
15425                  */
15426                 if (!intel_can_enable_sagv(state))
15427                         intel_disable_sagv(dev_priv);
15428
15429                 intel_modeset_verify_disabled(dev_priv, state);
15430         }
15431
15432         /* Complete the events for pipes that have now been disabled */
15433         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15434                 bool modeset = needs_modeset(new_crtc_state);
15435
15436                 /* Complete events for now disable pipes here. */
15437                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
15438                         spin_lock_irq(&dev->event_lock);
15439                         drm_crtc_send_vblank_event(&crtc->base,
15440                                                    new_crtc_state->uapi.event);
15441                         spin_unlock_irq(&dev->event_lock);
15442
15443                         new_crtc_state->uapi.event = NULL;
15444                 }
15445         }
15446
15447         if (state->modeset)
15448                 intel_encoders_update_prepare(state);
15449
15450         /* Enable all new slices, we might need */
15451         if (state->modeset)
15452                 icl_dbuf_slice_pre_update(state);
15453
15454         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
15455         dev_priv->display.commit_modeset_enables(state);
15456
15457         if (state->modeset) {
15458                 intel_encoders_update_complete(state);
15459
15460                 intel_set_cdclk_post_plane_update(state);
15461         }
15462
15463         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
15464          * already, but still need the state for the delayed optimization. To
15465          * fix this:
15466          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
15467          * - schedule that vblank worker _before_ calling hw_done
15468          * - at the start of commit_tail, cancel it _synchrously
15469          * - switch over to the vblank wait helper in the core after that since
15470          *   we don't need out special handling any more.
15471          */
15472         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
15473
15474         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15475                 if (new_crtc_state->hw.active &&
15476                     !needs_modeset(new_crtc_state) &&
15477                     !new_crtc_state->preload_luts &&
15478                     (new_crtc_state->uapi.color_mgmt_changed ||
15479                      new_crtc_state->update_pipe))
15480                         intel_color_load_luts(new_crtc_state);
15481         }
15482
15483         /*
15484          * Now that the vblank has passed, we can go ahead and program the
15485          * optimal watermarks on platforms that need two-step watermark
15486          * programming.
15487          *
15488          * TODO: Move this (and other cleanup) to an async worker eventually.
15489          */
15490         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15491                                             new_crtc_state, i) {
15492                 /*
15493                  * Gen2 reports pipe underruns whenever all planes are disabled.
15494                  * So re-enable underrun reporting after some planes get enabled.
15495                  *
15496                  * We do this before .optimize_watermarks() so that we have a
15497                  * chance of catching underruns with the intermediate watermarks
15498                  * vs. the new plane configuration.
15499                  */
15500                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
15501                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15502
15503                 if (dev_priv->display.optimize_watermarks)
15504                         dev_priv->display.optimize_watermarks(state, crtc);
15505         }
15506
15507         /* Disable all slices, we don't need */
15508         if (state->modeset)
15509                 icl_dbuf_slice_post_update(state);
15510
15511         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15512                 intel_post_plane_update(state, crtc);
15513
15514                 if (put_domains[i])
15515                         modeset_put_power_domains(dev_priv, put_domains[i]);
15516
15517                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
15518         }
15519
15520         /* Underruns don't always raise interrupts, so check manually */
15521         intel_check_cpu_fifo_underruns(dev_priv);
15522         intel_check_pch_fifo_underruns(dev_priv);
15523
15524         if (state->modeset)
15525                 intel_verify_planes(state);
15526
15527         if (state->modeset && intel_can_enable_sagv(state))
15528                 intel_enable_sagv(dev_priv);
15529
15530         drm_atomic_helper_commit_hw_done(&state->base);
15531
15532         if (state->modeset) {
15533                 /* As one of the primary mmio accessors, KMS has a high
15534                  * likelihood of triggering bugs in unclaimed access. After we
15535                  * finish modesetting, see if an error has been flagged, and if
15536                  * so enable debugging for the next modeset - and hope we catch
15537                  * the culprit.
15538                  */
15539                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15540                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15541         }
15542         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15543
15544         /*
15545          * Defer the cleanup of the old state to a separate worker to not
15546          * impede the current task (userspace for blocking modesets) that
15547          * are executed inline. For out-of-line asynchronous modesets/flips,
15548          * deferring to a new worker seems overkill, but we would place a
15549          * schedule point (cond_resched()) here anyway to keep latencies
15550          * down.
15551          */
15552         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15553         queue_work(system_highpri_wq, &state->base.commit_work);
15554 }
15555
15556 static void intel_atomic_commit_work(struct work_struct *work)
15557 {
15558         struct intel_atomic_state *state =
15559                 container_of(work, struct intel_atomic_state, base.commit_work);
15560
15561         intel_atomic_commit_tail(state);
15562 }
15563
15564 static int __i915_sw_fence_call
15565 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15566                           enum i915_sw_fence_notify notify)
15567 {
15568         struct intel_atomic_state *state =
15569                 container_of(fence, struct intel_atomic_state, commit_ready);
15570
15571         switch (notify) {
15572         case FENCE_COMPLETE:
15573                 /* we do blocking waits in the worker, nothing to do here */
15574                 break;
15575         case FENCE_FREE:
15576                 {
15577                         struct intel_atomic_helper *helper =
15578                                 &to_i915(state->base.dev)->atomic_helper;
15579
15580                         if (llist_add(&state->freed, &helper->free_list))
15581                                 schedule_work(&helper->free_work);
15582                         break;
15583                 }
15584         }
15585
15586         return NOTIFY_DONE;
15587 }
15588
15589 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15590 {
15591         struct intel_plane_state *old_plane_state, *new_plane_state;
15592         struct intel_plane *plane;
15593         int i;
15594
15595         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15596                                              new_plane_state, i)
15597                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15598                                         to_intel_frontbuffer(new_plane_state->hw.fb),
15599                                         plane->frontbuffer_bit);
15600 }
15601
15602 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
15603 {
15604         struct intel_crtc *crtc;
15605
15606         for_each_intel_crtc(&dev_priv->drm, crtc)
15607                 drm_modeset_lock_assert_held(&crtc->base.mutex);
15608 }
15609
15610 static int intel_atomic_commit(struct drm_device *dev,
15611                                struct drm_atomic_state *_state,
15612                                bool nonblock)
15613 {
15614         struct intel_atomic_state *state = to_intel_atomic_state(_state);
15615         struct drm_i915_private *dev_priv = to_i915(dev);
15616         int ret = 0;
15617
15618         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15619
15620         drm_atomic_state_get(&state->base);
15621         i915_sw_fence_init(&state->commit_ready,
15622                            intel_atomic_commit_ready);
15623
15624         /*
15625          * The intel_legacy_cursor_update() fast path takes care
15626          * of avoiding the vblank waits for simple cursor
15627          * movement and flips. For cursor on/off and size changes,
15628          * we want to perform the vblank waits so that watermark
15629          * updates happen during the correct frames. Gen9+ have
15630          * double buffered watermarks and so shouldn't need this.
15631          *
15632          * Unset state->legacy_cursor_update before the call to
15633          * drm_atomic_helper_setup_commit() because otherwise
15634          * drm_atomic_helper_wait_for_flip_done() is a noop and
15635          * we get FIFO underruns because we didn't wait
15636          * for vblank.
15637          *
15638          * FIXME doing watermarks and fb cleanup from a vblank worker
15639          * (assuming we had any) would solve these problems.
15640          */
15641         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15642                 struct intel_crtc_state *new_crtc_state;
15643                 struct intel_crtc *crtc;
15644                 int i;
15645
15646                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15647                         if (new_crtc_state->wm.need_postvbl_update ||
15648                             new_crtc_state->update_wm_post)
15649                                 state->base.legacy_cursor_update = false;
15650         }
15651
15652         ret = intel_atomic_prepare_commit(state);
15653         if (ret) {
15654                 drm_dbg_atomic(&dev_priv->drm,
15655                                "Preparing state failed with %i\n", ret);
15656                 i915_sw_fence_commit(&state->commit_ready);
15657                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15658                 return ret;
15659         }
15660
15661         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15662         if (!ret)
15663                 ret = drm_atomic_helper_swap_state(&state->base, true);
15664         if (!ret)
15665                 intel_atomic_swap_global_state(state);
15666
15667         if (ret) {
15668                 i915_sw_fence_commit(&state->commit_ready);
15669
15670                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15671                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15672                 return ret;
15673         }
15674         dev_priv->wm.distrust_bios_wm = false;
15675         intel_shared_dpll_swap_state(state);
15676         intel_atomic_track_fbs(state);
15677
15678         if (state->global_state_changed) {
15679                 assert_global_state_locked(dev_priv);
15680
15681                 dev_priv->active_pipes = state->active_pipes;
15682         }
15683
15684         drm_atomic_state_get(&state->base);
15685         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15686
15687         i915_sw_fence_commit(&state->commit_ready);
15688         if (nonblock && state->modeset) {
15689                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15690         } else if (nonblock) {
15691                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15692         } else {
15693                 if (state->modeset)
15694                         flush_workqueue(dev_priv->modeset_wq);
15695                 intel_atomic_commit_tail(state);
15696         }
15697
15698         return 0;
15699 }
15700
15701 struct wait_rps_boost {
15702         struct wait_queue_entry wait;
15703
15704         struct drm_crtc *crtc;
15705         struct i915_request *request;
15706 };
15707
15708 static int do_rps_boost(struct wait_queue_entry *_wait,
15709                         unsigned mode, int sync, void *key)
15710 {
15711         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15712         struct i915_request *rq = wait->request;
15713
15714         /*
15715          * If we missed the vblank, but the request is already running it
15716          * is reasonable to assume that it will complete before the next
15717          * vblank without our intervention, so leave RPS alone.
15718          */
15719         if (!i915_request_started(rq))
15720                 intel_rps_boost(rq);
15721         i915_request_put(rq);
15722
15723         drm_crtc_vblank_put(wait->crtc);
15724
15725         list_del(&wait->wait.entry);
15726         kfree(wait);
15727         return 1;
15728 }
15729
15730 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15731                                        struct dma_fence *fence)
15732 {
15733         struct wait_rps_boost *wait;
15734
15735         if (!dma_fence_is_i915(fence))
15736                 return;
15737
15738         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15739                 return;
15740
15741         if (drm_crtc_vblank_get(crtc))
15742                 return;
15743
15744         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15745         if (!wait) {
15746                 drm_crtc_vblank_put(crtc);
15747                 return;
15748         }
15749
15750         wait->request = to_request(dma_fence_get(fence));
15751         wait->crtc = crtc;
15752
15753         wait->wait.func = do_rps_boost;
15754         wait->wait.flags = 0;
15755
15756         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15757 }
15758
15759 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15760 {
15761         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15762         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15763         struct drm_framebuffer *fb = plane_state->hw.fb;
15764         struct i915_vma *vma;
15765
15766         if (plane->id == PLANE_CURSOR &&
15767             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15768                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15769                 const int align = intel_cursor_alignment(dev_priv);
15770                 int err;
15771
15772                 err = i915_gem_object_attach_phys(obj, align);
15773                 if (err)
15774                         return err;
15775         }
15776
15777         vma = intel_pin_and_fence_fb_obj(fb,
15778                                          &plane_state->view,
15779                                          intel_plane_uses_fence(plane_state),
15780                                          &plane_state->flags);
15781         if (IS_ERR(vma))
15782                 return PTR_ERR(vma);
15783
15784         plane_state->vma = vma;
15785
15786         return 0;
15787 }
15788
15789 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15790 {
15791         struct i915_vma *vma;
15792
15793         vma = fetch_and_zero(&old_plane_state->vma);
15794         if (vma)
15795                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15796 }
15797
15798 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15799 {
15800         struct i915_sched_attr attr = {
15801                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15802         };
15803
15804         i915_gem_object_wait_priority(obj, 0, &attr);
15805 }
15806
15807 /**
15808  * intel_prepare_plane_fb - Prepare fb for usage on plane
15809  * @_plane: drm plane to prepare for
15810  * @_new_plane_state: the plane state being prepared
15811  *
15812  * Prepares a framebuffer for usage on a display plane.  Generally this
15813  * involves pinning the underlying object and updating the frontbuffer tracking
15814  * bits.  Some older platforms need special physical address handling for
15815  * cursor planes.
15816  *
15817  * Returns 0 on success, negative error code on failure.
15818  */
15819 int
15820 intel_prepare_plane_fb(struct drm_plane *_plane,
15821                        struct drm_plane_state *_new_plane_state)
15822 {
15823         struct intel_plane *plane = to_intel_plane(_plane);
15824         struct intel_plane_state *new_plane_state =
15825                 to_intel_plane_state(_new_plane_state);
15826         struct intel_atomic_state *state =
15827                 to_intel_atomic_state(new_plane_state->uapi.state);
15828         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15829         const struct intel_plane_state *old_plane_state =
15830                 intel_atomic_get_old_plane_state(state, plane);
15831         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
15832         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
15833         int ret;
15834
15835         if (old_obj) {
15836                 const struct intel_crtc_state *crtc_state =
15837                         intel_atomic_get_new_crtc_state(state,
15838                                                         to_intel_crtc(old_plane_state->hw.crtc));
15839
15840                 /* Big Hammer, we also need to ensure that any pending
15841                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15842                  * current scanout is retired before unpinning the old
15843                  * framebuffer. Note that we rely on userspace rendering
15844                  * into the buffer attached to the pipe they are waiting
15845                  * on. If not, userspace generates a GPU hang with IPEHR
15846                  * point to the MI_WAIT_FOR_EVENT.
15847                  *
15848                  * This should only fail upon a hung GPU, in which case we
15849                  * can safely continue.
15850                  */
15851                 if (needs_modeset(crtc_state)) {
15852                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
15853                                                               old_obj->base.resv, NULL,
15854                                                               false, 0,
15855                                                               GFP_KERNEL);
15856                         if (ret < 0)
15857                                 return ret;
15858                 }
15859         }
15860
15861         if (new_plane_state->uapi.fence) { /* explicit fencing */
15862                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
15863                                                     new_plane_state->uapi.fence,
15864                                                     I915_FENCE_TIMEOUT,
15865                                                     GFP_KERNEL);
15866                 if (ret < 0)
15867                         return ret;
15868         }
15869
15870         if (!obj)
15871                 return 0;
15872
15873         ret = i915_gem_object_pin_pages(obj);
15874         if (ret)
15875                 return ret;
15876
15877         ret = intel_plane_pin_fb(new_plane_state);
15878
15879         i915_gem_object_unpin_pages(obj);
15880         if (ret)
15881                 return ret;
15882
15883         fb_obj_bump_render_priority(obj);
15884         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
15885
15886         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15887                 struct dma_fence *fence;
15888
15889                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
15890                                                       obj->base.resv, NULL,
15891                                                       false, I915_FENCE_TIMEOUT,
15892                                                       GFP_KERNEL);
15893                 if (ret < 0)
15894                         goto unpin_fb;
15895
15896                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15897                 if (fence) {
15898                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15899                                                    fence);
15900                         dma_fence_put(fence);
15901                 }
15902         } else {
15903                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15904                                            new_plane_state->uapi.fence);
15905         }
15906
15907         /*
15908          * We declare pageflips to be interactive and so merit a small bias
15909          * towards upclocking to deliver the frame on time. By only changing
15910          * the RPS thresholds to sample more regularly and aim for higher
15911          * clocks we can hopefully deliver low power workloads (like kodi)
15912          * that are not quite steady state without resorting to forcing
15913          * maximum clocks following a vblank miss (see do_rps_boost()).
15914          */
15915         if (!state->rps_interactive) {
15916                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15917                 state->rps_interactive = true;
15918         }
15919
15920         return 0;
15921
15922 unpin_fb:
15923         intel_plane_unpin_fb(new_plane_state);
15924
15925         return ret;
15926 }
15927
15928 /**
15929  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15930  * @plane: drm plane to clean up for
15931  * @_old_plane_state: the state from the previous modeset
15932  *
15933  * Cleans up a framebuffer that has just been removed from a plane.
15934  */
15935 void
15936 intel_cleanup_plane_fb(struct drm_plane *plane,
15937                        struct drm_plane_state *_old_plane_state)
15938 {
15939         struct intel_plane_state *old_plane_state =
15940                 to_intel_plane_state(_old_plane_state);
15941         struct intel_atomic_state *state =
15942                 to_intel_atomic_state(old_plane_state->uapi.state);
15943         struct drm_i915_private *dev_priv = to_i915(plane->dev);
15944         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
15945
15946         if (!obj)
15947                 return;
15948
15949         if (state->rps_interactive) {
15950                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15951                 state->rps_interactive = false;
15952         }
15953
15954         /* Should only be called after a successful intel_prepare_plane_fb()! */
15955         intel_plane_unpin_fb(old_plane_state);
15956 }
15957
15958 /**
15959  * intel_plane_destroy - destroy a plane
15960  * @plane: plane to destroy
15961  *
15962  * Common destruction function for all types of planes (primary, cursor,
15963  * sprite).
15964  */
15965 void intel_plane_destroy(struct drm_plane *plane)
15966 {
15967         drm_plane_cleanup(plane);
15968         kfree(to_intel_plane(plane));
15969 }
15970
15971 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15972                                             u32 format, u64 modifier)
15973 {
15974         switch (modifier) {
15975         case DRM_FORMAT_MOD_LINEAR:
15976         case I915_FORMAT_MOD_X_TILED:
15977                 break;
15978         default:
15979                 return false;
15980         }
15981
15982         switch (format) {
15983         case DRM_FORMAT_C8:
15984         case DRM_FORMAT_RGB565:
15985         case DRM_FORMAT_XRGB1555:
15986         case DRM_FORMAT_XRGB8888:
15987                 return modifier == DRM_FORMAT_MOD_LINEAR ||
15988                         modifier == I915_FORMAT_MOD_X_TILED;
15989         default:
15990                 return false;
15991         }
15992 }
15993
15994 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15995                                             u32 format, u64 modifier)
15996 {
15997         switch (modifier) {
15998         case DRM_FORMAT_MOD_LINEAR:
15999         case I915_FORMAT_MOD_X_TILED:
16000                 break;
16001         default:
16002                 return false;
16003         }
16004
16005         switch (format) {
16006         case DRM_FORMAT_C8:
16007         case DRM_FORMAT_RGB565:
16008         case DRM_FORMAT_XRGB8888:
16009         case DRM_FORMAT_XBGR8888:
16010         case DRM_FORMAT_ARGB8888:
16011         case DRM_FORMAT_ABGR8888:
16012         case DRM_FORMAT_XRGB2101010:
16013         case DRM_FORMAT_XBGR2101010:
16014         case DRM_FORMAT_ARGB2101010:
16015         case DRM_FORMAT_ABGR2101010:
16016         case DRM_FORMAT_XBGR16161616F:
16017                 return modifier == DRM_FORMAT_MOD_LINEAR ||
16018                         modifier == I915_FORMAT_MOD_X_TILED;
16019         default:
16020                 return false;
16021         }
16022 }
16023
16024 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
16025                                               u32 format, u64 modifier)
16026 {
16027         return modifier == DRM_FORMAT_MOD_LINEAR &&
16028                 format == DRM_FORMAT_ARGB8888;
16029 }
16030
16031 static const struct drm_plane_funcs i965_plane_funcs = {
16032         .update_plane = drm_atomic_helper_update_plane,
16033         .disable_plane = drm_atomic_helper_disable_plane,
16034         .destroy = intel_plane_destroy,
16035         .atomic_duplicate_state = intel_plane_duplicate_state,
16036         .atomic_destroy_state = intel_plane_destroy_state,
16037         .format_mod_supported = i965_plane_format_mod_supported,
16038 };
16039
16040 static const struct drm_plane_funcs i8xx_plane_funcs = {
16041         .update_plane = drm_atomic_helper_update_plane,
16042         .disable_plane = drm_atomic_helper_disable_plane,
16043         .destroy = intel_plane_destroy,
16044         .atomic_duplicate_state = intel_plane_duplicate_state,
16045         .atomic_destroy_state = intel_plane_destroy_state,
16046         .format_mod_supported = i8xx_plane_format_mod_supported,
16047 };
16048
16049 static int
16050 intel_legacy_cursor_update(struct drm_plane *_plane,
16051                            struct drm_crtc *_crtc,
16052                            struct drm_framebuffer *fb,
16053                            int crtc_x, int crtc_y,
16054                            unsigned int crtc_w, unsigned int crtc_h,
16055                            u32 src_x, u32 src_y,
16056                            u32 src_w, u32 src_h,
16057                            struct drm_modeset_acquire_ctx *ctx)
16058 {
16059         struct intel_plane *plane = to_intel_plane(_plane);
16060         struct intel_crtc *crtc = to_intel_crtc(_crtc);
16061         struct intel_plane_state *old_plane_state =
16062                 to_intel_plane_state(plane->base.state);
16063         struct intel_plane_state *new_plane_state;
16064         struct intel_crtc_state *crtc_state =
16065                 to_intel_crtc_state(crtc->base.state);
16066         struct intel_crtc_state *new_crtc_state;
16067         int ret;
16068
16069         /*
16070          * When crtc is inactive or there is a modeset pending,
16071          * wait for it to complete in the slowpath
16072          */
16073         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
16074             crtc_state->update_pipe)
16075                 goto slow;
16076
16077         /*
16078          * Don't do an async update if there is an outstanding commit modifying
16079          * the plane.  This prevents our async update's changes from getting
16080          * overridden by a previous synchronous update's state.
16081          */
16082         if (old_plane_state->uapi.commit &&
16083             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
16084                 goto slow;
16085
16086         /*
16087          * If any parameters change that may affect watermarks,
16088          * take the slowpath. Only changing fb or position should be
16089          * in the fastpath.
16090          */
16091         if (old_plane_state->uapi.crtc != &crtc->base ||
16092             old_plane_state->uapi.src_w != src_w ||
16093             old_plane_state->uapi.src_h != src_h ||
16094             old_plane_state->uapi.crtc_w != crtc_w ||
16095             old_plane_state->uapi.crtc_h != crtc_h ||
16096             !old_plane_state->uapi.fb != !fb)
16097                 goto slow;
16098
16099         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
16100         if (!new_plane_state)
16101                 return -ENOMEM;
16102
16103         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
16104         if (!new_crtc_state) {
16105                 ret = -ENOMEM;
16106                 goto out_free;
16107         }
16108
16109         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
16110
16111         new_plane_state->uapi.src_x = src_x;
16112         new_plane_state->uapi.src_y = src_y;
16113         new_plane_state->uapi.src_w = src_w;
16114         new_plane_state->uapi.src_h = src_h;
16115         new_plane_state->uapi.crtc_x = crtc_x;
16116         new_plane_state->uapi.crtc_y = crtc_y;
16117         new_plane_state->uapi.crtc_w = crtc_w;
16118         new_plane_state->uapi.crtc_h = crtc_h;
16119
16120         intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
16121
16122         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
16123                                                   old_plane_state, new_plane_state);
16124         if (ret)
16125                 goto out_free;
16126
16127         ret = intel_plane_pin_fb(new_plane_state);
16128         if (ret)
16129                 goto out_free;
16130
16131         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
16132                                 ORIGIN_FLIP);
16133         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
16134                                 to_intel_frontbuffer(new_plane_state->hw.fb),
16135                                 plane->frontbuffer_bit);
16136
16137         /* Swap plane state */
16138         plane->base.state = &new_plane_state->uapi;
16139
16140         /*
16141          * We cannot swap crtc_state as it may be in use by an atomic commit or
16142          * page flip that's running simultaneously. If we swap crtc_state and
16143          * destroy the old state, we will cause a use-after-free there.
16144          *
16145          * Only update active_planes, which is needed for our internal
16146          * bookkeeping. Either value will do the right thing when updating
16147          * planes atomically. If the cursor was part of the atomic update then
16148          * we would have taken the slowpath.
16149          */
16150         crtc_state->active_planes = new_crtc_state->active_planes;
16151
16152         if (new_plane_state->uapi.visible)
16153                 intel_update_plane(plane, crtc_state, new_plane_state);
16154         else
16155                 intel_disable_plane(plane, crtc_state);
16156
16157         intel_plane_unpin_fb(old_plane_state);
16158
16159 out_free:
16160         if (new_crtc_state)
16161                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
16162         if (ret)
16163                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
16164         else
16165                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
16166         return ret;
16167
16168 slow:
16169         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
16170                                               crtc_x, crtc_y, crtc_w, crtc_h,
16171                                               src_x, src_y, src_w, src_h, ctx);
16172 }
16173
16174 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
16175         .update_plane = intel_legacy_cursor_update,
16176         .disable_plane = drm_atomic_helper_disable_plane,
16177         .destroy = intel_plane_destroy,
16178         .atomic_duplicate_state = intel_plane_duplicate_state,
16179         .atomic_destroy_state = intel_plane_destroy_state,
16180         .format_mod_supported = intel_cursor_format_mod_supported,
16181 };
16182
16183 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
16184                                enum i9xx_plane_id i9xx_plane)
16185 {
16186         if (!HAS_FBC(dev_priv))
16187                 return false;
16188
16189         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16190                 return i9xx_plane == PLANE_A; /* tied to pipe A */
16191         else if (IS_IVYBRIDGE(dev_priv))
16192                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
16193                         i9xx_plane == PLANE_C;
16194         else if (INTEL_GEN(dev_priv) >= 4)
16195                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
16196         else
16197                 return i9xx_plane == PLANE_A;
16198 }
16199
16200 static struct intel_plane *
16201 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
16202 {
16203         struct intel_plane *plane;
16204         const struct drm_plane_funcs *plane_funcs;
16205         unsigned int supported_rotations;
16206         const u32 *formats;
16207         int num_formats;
16208         int ret, zpos;
16209
16210         if (INTEL_GEN(dev_priv) >= 9)
16211                 return skl_universal_plane_create(dev_priv, pipe,
16212                                                   PLANE_PRIMARY);
16213
16214         plane = intel_plane_alloc();
16215         if (IS_ERR(plane))
16216                 return plane;
16217
16218         plane->pipe = pipe;
16219         /*
16220          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
16221          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
16222          */
16223         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
16224                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
16225         else
16226                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
16227         plane->id = PLANE_PRIMARY;
16228         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
16229
16230         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
16231         if (plane->has_fbc) {
16232                 struct intel_fbc *fbc = &dev_priv->fbc;
16233
16234                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
16235         }
16236
16237         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16238                 formats = vlv_primary_formats;
16239                 num_formats = ARRAY_SIZE(vlv_primary_formats);
16240         } else if (INTEL_GEN(dev_priv) >= 4) {
16241                 /*
16242                  * WaFP16GammaEnabling:ivb
16243                  * "Workaround : When using the 64-bit format, the plane
16244                  *  output on each color channel has one quarter amplitude.
16245                  *  It can be brought up to full amplitude by using pipe
16246                  *  gamma correction or pipe color space conversion to
16247                  *  multiply the plane output by four."
16248                  *
16249                  * There is no dedicated plane gamma for the primary plane,
16250                  * and using the pipe gamma/csc could conflict with other
16251                  * planes, so we choose not to expose fp16 on IVB primary
16252                  * planes. HSW primary planes no longer have this problem.
16253                  */
16254                 if (IS_IVYBRIDGE(dev_priv)) {
16255                         formats = ivb_primary_formats;
16256                         num_formats = ARRAY_SIZE(ivb_primary_formats);
16257                 } else {
16258                         formats = i965_primary_formats;
16259                         num_formats = ARRAY_SIZE(i965_primary_formats);
16260                 }
16261         } else {
16262                 formats = i8xx_primary_formats;
16263                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
16264         }
16265
16266         if (INTEL_GEN(dev_priv) >= 4)
16267                 plane_funcs = &i965_plane_funcs;
16268         else
16269                 plane_funcs = &i8xx_plane_funcs;
16270
16271         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16272                 plane->min_cdclk = vlv_plane_min_cdclk;
16273         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16274                 plane->min_cdclk = hsw_plane_min_cdclk;
16275         else if (IS_IVYBRIDGE(dev_priv))
16276                 plane->min_cdclk = ivb_plane_min_cdclk;
16277         else
16278                 plane->min_cdclk = i9xx_plane_min_cdclk;
16279
16280         plane->max_stride = i9xx_plane_max_stride;
16281         plane->update_plane = i9xx_update_plane;
16282         plane->disable_plane = i9xx_disable_plane;
16283         plane->get_hw_state = i9xx_plane_get_hw_state;
16284         plane->check_plane = i9xx_plane_check;
16285
16286         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
16287                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16288                                                0, plane_funcs,
16289                                                formats, num_formats,
16290                                                i9xx_format_modifiers,
16291                                                DRM_PLANE_TYPE_PRIMARY,
16292                                                "primary %c", pipe_name(pipe));
16293         else
16294                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16295                                                0, plane_funcs,
16296                                                formats, num_formats,
16297                                                i9xx_format_modifiers,
16298                                                DRM_PLANE_TYPE_PRIMARY,
16299                                                "plane %c",
16300                                                plane_name(plane->i9xx_plane));
16301         if (ret)
16302                 goto fail;
16303
16304         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
16305                 supported_rotations =
16306                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
16307                         DRM_MODE_REFLECT_X;
16308         } else if (INTEL_GEN(dev_priv) >= 4) {
16309                 supported_rotations =
16310                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
16311         } else {
16312                 supported_rotations = DRM_MODE_ROTATE_0;
16313         }
16314
16315         if (INTEL_GEN(dev_priv) >= 4)
16316                 drm_plane_create_rotation_property(&plane->base,
16317                                                    DRM_MODE_ROTATE_0,
16318                                                    supported_rotations);
16319
16320         zpos = 0;
16321         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
16322
16323         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
16324
16325         return plane;
16326
16327 fail:
16328         intel_plane_free(plane);
16329
16330         return ERR_PTR(ret);
16331 }
16332
16333 static struct intel_plane *
16334 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
16335                           enum pipe pipe)
16336 {
16337         struct intel_plane *cursor;
16338         int ret, zpos;
16339
16340         cursor = intel_plane_alloc();
16341         if (IS_ERR(cursor))
16342                 return cursor;
16343
16344         cursor->pipe = pipe;
16345         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
16346         cursor->id = PLANE_CURSOR;
16347         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
16348
16349         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
16350                 cursor->max_stride = i845_cursor_max_stride;
16351                 cursor->update_plane = i845_update_cursor;
16352                 cursor->disable_plane = i845_disable_cursor;
16353                 cursor->get_hw_state = i845_cursor_get_hw_state;
16354                 cursor->check_plane = i845_check_cursor;
16355         } else {
16356                 cursor->max_stride = i9xx_cursor_max_stride;
16357                 cursor->update_plane = i9xx_update_cursor;
16358                 cursor->disable_plane = i9xx_disable_cursor;
16359                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
16360                 cursor->check_plane = i9xx_check_cursor;
16361         }
16362
16363         cursor->cursor.base = ~0;
16364         cursor->cursor.cntl = ~0;
16365
16366         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
16367                 cursor->cursor.size = ~0;
16368
16369         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
16370                                        0, &intel_cursor_plane_funcs,
16371                                        intel_cursor_formats,
16372                                        ARRAY_SIZE(intel_cursor_formats),
16373                                        cursor_format_modifiers,
16374                                        DRM_PLANE_TYPE_CURSOR,
16375                                        "cursor %c", pipe_name(pipe));
16376         if (ret)
16377                 goto fail;
16378
16379         if (INTEL_GEN(dev_priv) >= 4)
16380                 drm_plane_create_rotation_property(&cursor->base,
16381                                                    DRM_MODE_ROTATE_0,
16382                                                    DRM_MODE_ROTATE_0 |
16383                                                    DRM_MODE_ROTATE_180);
16384
16385         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
16386         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
16387
16388         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
16389
16390         return cursor;
16391
16392 fail:
16393         intel_plane_free(cursor);
16394
16395         return ERR_PTR(ret);
16396 }
16397
16398 #define INTEL_CRTC_FUNCS \
16399         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
16400         .set_config = drm_atomic_helper_set_config, \
16401         .destroy = intel_crtc_destroy, \
16402         .page_flip = drm_atomic_helper_page_flip, \
16403         .atomic_duplicate_state = intel_crtc_duplicate_state, \
16404         .atomic_destroy_state = intel_crtc_destroy_state, \
16405         .set_crc_source = intel_crtc_set_crc_source, \
16406         .verify_crc_source = intel_crtc_verify_crc_source, \
16407         .get_crc_sources = intel_crtc_get_crc_sources
16408
16409 static const struct drm_crtc_funcs bdw_crtc_funcs = {
16410         INTEL_CRTC_FUNCS,
16411
16412         .get_vblank_counter = g4x_get_vblank_counter,
16413         .enable_vblank = bdw_enable_vblank,
16414         .disable_vblank = bdw_disable_vblank,
16415 };
16416
16417 static const struct drm_crtc_funcs ilk_crtc_funcs = {
16418         INTEL_CRTC_FUNCS,
16419
16420         .get_vblank_counter = g4x_get_vblank_counter,
16421         .enable_vblank = ilk_enable_vblank,
16422         .disable_vblank = ilk_disable_vblank,
16423 };
16424
16425 static const struct drm_crtc_funcs g4x_crtc_funcs = {
16426         INTEL_CRTC_FUNCS,
16427
16428         .get_vblank_counter = g4x_get_vblank_counter,
16429         .enable_vblank = i965_enable_vblank,
16430         .disable_vblank = i965_disable_vblank,
16431 };
16432
16433 static const struct drm_crtc_funcs i965_crtc_funcs = {
16434         INTEL_CRTC_FUNCS,
16435
16436         .get_vblank_counter = i915_get_vblank_counter,
16437         .enable_vblank = i965_enable_vblank,
16438         .disable_vblank = i965_disable_vblank,
16439 };
16440
16441 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
16442         INTEL_CRTC_FUNCS,
16443
16444         .get_vblank_counter = i915_get_vblank_counter,
16445         .enable_vblank = i915gm_enable_vblank,
16446         .disable_vblank = i915gm_disable_vblank,
16447 };
16448
16449 static const struct drm_crtc_funcs i915_crtc_funcs = {
16450         INTEL_CRTC_FUNCS,
16451
16452         .get_vblank_counter = i915_get_vblank_counter,
16453         .enable_vblank = i8xx_enable_vblank,
16454         .disable_vblank = i8xx_disable_vblank,
16455 };
16456
16457 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
16458         INTEL_CRTC_FUNCS,
16459
16460         /* no hw vblank counter */
16461         .enable_vblank = i8xx_enable_vblank,
16462         .disable_vblank = i8xx_disable_vblank,
16463 };
16464
16465 static struct intel_crtc *intel_crtc_alloc(void)
16466 {
16467         struct intel_crtc_state *crtc_state;
16468         struct intel_crtc *crtc;
16469
16470         crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
16471         if (!crtc)
16472                 return ERR_PTR(-ENOMEM);
16473
16474         crtc_state = intel_crtc_state_alloc(crtc);
16475         if (!crtc_state) {
16476                 kfree(crtc);
16477                 return ERR_PTR(-ENOMEM);
16478         }
16479
16480         crtc->base.state = &crtc_state->uapi;
16481         crtc->config = crtc_state;
16482
16483         return crtc;
16484 }
16485
16486 static void intel_crtc_free(struct intel_crtc *crtc)
16487 {
16488         intel_crtc_destroy_state(&crtc->base, crtc->base.state);
16489         kfree(crtc);
16490 }
16491
16492 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
16493 {
16494         struct intel_plane *plane;
16495
16496         for_each_intel_plane(&dev_priv->drm, plane) {
16497                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
16498                                                                   plane->pipe);
16499
16500                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
16501         }
16502 }
16503
16504 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
16505 {
16506         struct intel_plane *primary, *cursor;
16507         const struct drm_crtc_funcs *funcs;
16508         struct intel_crtc *crtc;
16509         int sprite, ret;
16510
16511         crtc = intel_crtc_alloc();
16512         if (IS_ERR(crtc))
16513                 return PTR_ERR(crtc);
16514
16515         crtc->pipe = pipe;
16516         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
16517
16518         primary = intel_primary_plane_create(dev_priv, pipe);
16519         if (IS_ERR(primary)) {
16520                 ret = PTR_ERR(primary);
16521                 goto fail;
16522         }
16523         crtc->plane_ids_mask |= BIT(primary->id);
16524
16525         for_each_sprite(dev_priv, pipe, sprite) {
16526                 struct intel_plane *plane;
16527
16528                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
16529                 if (IS_ERR(plane)) {
16530                         ret = PTR_ERR(plane);
16531                         goto fail;
16532                 }
16533                 crtc->plane_ids_mask |= BIT(plane->id);
16534         }
16535
16536         cursor = intel_cursor_plane_create(dev_priv, pipe);
16537         if (IS_ERR(cursor)) {
16538                 ret = PTR_ERR(cursor);
16539                 goto fail;
16540         }
16541         crtc->plane_ids_mask |= BIT(cursor->id);
16542
16543         if (HAS_GMCH(dev_priv)) {
16544                 if (IS_CHERRYVIEW(dev_priv) ||
16545                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
16546                         funcs = &g4x_crtc_funcs;
16547                 else if (IS_GEN(dev_priv, 4))
16548                         funcs = &i965_crtc_funcs;
16549                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
16550                         funcs = &i915gm_crtc_funcs;
16551                 else if (IS_GEN(dev_priv, 3))
16552                         funcs = &i915_crtc_funcs;
16553                 else
16554                         funcs = &i8xx_crtc_funcs;
16555         } else {
16556                 if (INTEL_GEN(dev_priv) >= 8)
16557                         funcs = &bdw_crtc_funcs;
16558                 else
16559                         funcs = &ilk_crtc_funcs;
16560         }
16561
16562         ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
16563                                         &primary->base, &cursor->base,
16564                                         funcs, "pipe %c", pipe_name(pipe));
16565         if (ret)
16566                 goto fail;
16567
16568         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
16569                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
16570         dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
16571
16572         if (INTEL_GEN(dev_priv) < 9) {
16573                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
16574
16575                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
16576                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16577                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16578         }
16579
16580         intel_color_init(crtc);
16581
16582         intel_crtc_crc_init(crtc);
16583
16584         drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
16585
16586         return 0;
16587
16588 fail:
16589         intel_crtc_free(crtc);
16590
16591         return ret;
16592 }
16593
16594 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16595                                       struct drm_file *file)
16596 {
16597         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16598         struct drm_crtc *drmmode_crtc;
16599         struct intel_crtc *crtc;
16600
16601         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16602         if (!drmmode_crtc)
16603                 return -ENOENT;
16604
16605         crtc = to_intel_crtc(drmmode_crtc);
16606         pipe_from_crtc_id->pipe = crtc->pipe;
16607
16608         return 0;
16609 }
16610
16611 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16612 {
16613         struct drm_device *dev = encoder->base.dev;
16614         struct intel_encoder *source_encoder;
16615         u32 possible_clones = 0;
16616
16617         for_each_intel_encoder(dev, source_encoder) {
16618                 if (encoders_cloneable(encoder, source_encoder))
16619                         possible_clones |= drm_encoder_mask(&source_encoder->base);
16620         }
16621
16622         return possible_clones;
16623 }
16624
16625 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16626 {
16627         struct drm_device *dev = encoder->base.dev;
16628         struct intel_crtc *crtc;
16629         u32 possible_crtcs = 0;
16630
16631         for_each_intel_crtc(dev, crtc) {
16632                 if (encoder->pipe_mask & BIT(crtc->pipe))
16633                         possible_crtcs |= drm_crtc_mask(&crtc->base);
16634         }
16635
16636         return possible_crtcs;
16637 }
16638
16639 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16640 {
16641         if (!IS_MOBILE(dev_priv))
16642                 return false;
16643
16644         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
16645                 return false;
16646
16647         if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
16648                 return false;
16649
16650         return true;
16651 }
16652
16653 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16654 {
16655         if (INTEL_GEN(dev_priv) >= 9)
16656                 return false;
16657
16658         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16659                 return false;
16660
16661         if (HAS_PCH_LPT_H(dev_priv) &&
16662             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16663                 return false;
16664
16665         /* DDI E can't be used if DDI A requires 4 lanes */
16666         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16667                 return false;
16668
16669         if (!dev_priv->vbt.int_crt_support)
16670                 return false;
16671
16672         return true;
16673 }
16674
16675 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16676 {
16677         int pps_num;
16678         int pps_idx;
16679
16680         if (HAS_DDI(dev_priv))
16681                 return;
16682         /*
16683          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16684          * everywhere where registers can be write protected.
16685          */
16686         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16687                 pps_num = 2;
16688         else
16689                 pps_num = 1;
16690
16691         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16692                 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
16693
16694                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16695                 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
16696         }
16697 }
16698
16699 static void intel_pps_init(struct drm_i915_private *dev_priv)
16700 {
16701         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16702                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16703         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16704                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16705         else
16706                 dev_priv->pps_mmio_base = PPS_BASE;
16707
16708         intel_pps_unlock_regs_wa(dev_priv);
16709 }
16710
16711 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16712 {
16713         struct intel_encoder *encoder;
16714         bool dpd_is_edp = false;
16715
16716         intel_pps_init(dev_priv);
16717
16718         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16719                 return;
16720
16721         if (INTEL_GEN(dev_priv) >= 12) {
16722                 intel_ddi_init(dev_priv, PORT_A);
16723                 intel_ddi_init(dev_priv, PORT_B);
16724                 intel_ddi_init(dev_priv, PORT_D);
16725                 intel_ddi_init(dev_priv, PORT_E);
16726                 intel_ddi_init(dev_priv, PORT_F);
16727                 intel_ddi_init(dev_priv, PORT_G);
16728                 intel_ddi_init(dev_priv, PORT_H);
16729                 intel_ddi_init(dev_priv, PORT_I);
16730                 icl_dsi_init(dev_priv);
16731         } else if (IS_ELKHARTLAKE(dev_priv)) {
16732                 intel_ddi_init(dev_priv, PORT_A);
16733                 intel_ddi_init(dev_priv, PORT_B);
16734                 intel_ddi_init(dev_priv, PORT_C);
16735                 intel_ddi_init(dev_priv, PORT_D);
16736                 icl_dsi_init(dev_priv);
16737         } else if (IS_GEN(dev_priv, 11)) {
16738                 intel_ddi_init(dev_priv, PORT_A);
16739                 intel_ddi_init(dev_priv, PORT_B);
16740                 intel_ddi_init(dev_priv, PORT_C);
16741                 intel_ddi_init(dev_priv, PORT_D);
16742                 intel_ddi_init(dev_priv, PORT_E);
16743                 /*
16744                  * On some ICL SKUs port F is not present. No strap bits for
16745                  * this, so rely on VBT.
16746                  * Work around broken VBTs on SKUs known to have no port F.
16747                  */
16748                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16749                     intel_bios_is_port_present(dev_priv, PORT_F))
16750                         intel_ddi_init(dev_priv, PORT_F);
16751
16752                 icl_dsi_init(dev_priv);
16753         } else if (IS_GEN9_LP(dev_priv)) {
16754                 /*
16755                  * FIXME: Broxton doesn't support port detection via the
16756                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16757                  * detect the ports.
16758                  */
16759                 intel_ddi_init(dev_priv, PORT_A);
16760                 intel_ddi_init(dev_priv, PORT_B);
16761                 intel_ddi_init(dev_priv, PORT_C);
16762
16763                 vlv_dsi_init(dev_priv);
16764         } else if (HAS_DDI(dev_priv)) {
16765                 int found;
16766
16767                 if (intel_ddi_crt_present(dev_priv))
16768                         intel_crt_init(dev_priv);
16769
16770                 /*
16771                  * Haswell uses DDI functions to detect digital outputs.
16772                  * On SKL pre-D0 the strap isn't connected, so we assume
16773                  * it's there.
16774                  */
16775                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16776                 /* WaIgnoreDDIAStrap: skl */
16777                 if (found || IS_GEN9_BC(dev_priv))
16778                         intel_ddi_init(dev_priv, PORT_A);
16779
16780                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16781                  * register */
16782                 found = intel_de_read(dev_priv, SFUSE_STRAP);
16783
16784                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16785                         intel_ddi_init(dev_priv, PORT_B);
16786                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16787                         intel_ddi_init(dev_priv, PORT_C);
16788                 if (found & SFUSE_STRAP_DDID_DETECTED)
16789                         intel_ddi_init(dev_priv, PORT_D);
16790                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16791                         intel_ddi_init(dev_priv, PORT_F);
16792                 /*
16793                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16794                  */
16795                 if (IS_GEN9_BC(dev_priv) &&
16796                     intel_bios_is_port_present(dev_priv, PORT_E))
16797                         intel_ddi_init(dev_priv, PORT_E);
16798
16799         } else if (HAS_PCH_SPLIT(dev_priv)) {
16800                 int found;
16801
16802                 /*
16803                  * intel_edp_init_connector() depends on this completing first,
16804                  * to prevent the registration of both eDP and LVDS and the
16805                  * incorrect sharing of the PPS.
16806                  */
16807                 intel_lvds_init(dev_priv);
16808                 intel_crt_init(dev_priv);
16809
16810                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16811
16812                 if (ilk_has_edp_a(dev_priv))
16813                         intel_dp_init(dev_priv, DP_A, PORT_A);
16814
16815                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
16816                         /* PCH SDVOB multiplex with HDMIB */
16817                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16818                         if (!found)
16819                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16820                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
16821                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16822                 }
16823
16824                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
16825                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16826
16827                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
16828                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16829
16830                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
16831                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16832
16833                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
16834                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16835         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16836                 bool has_edp, has_port;
16837
16838                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16839                         intel_crt_init(dev_priv);
16840
16841                 /*
16842                  * The DP_DETECTED bit is the latched state of the DDC
16843                  * SDA pin at boot. However since eDP doesn't require DDC
16844                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16845                  * eDP ports may have been muxed to an alternate function.
16846                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16847                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16848                  * detect eDP ports.
16849                  *
16850                  * Sadly the straps seem to be missing sometimes even for HDMI
16851                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16852                  * and VBT for the presence of the port. Additionally we can't
16853                  * trust the port type the VBT declares as we've seen at least
16854                  * HDMI ports that the VBT claim are DP or eDP.
16855                  */
16856                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16857                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16858                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
16859                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16860                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16861                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16862
16863                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16864                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16865                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
16866                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16867                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16868                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16869
16870                 if (IS_CHERRYVIEW(dev_priv)) {
16871                         /*
16872                          * eDP not supported on port D,
16873                          * so no need to worry about it
16874                          */
16875                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16876                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
16877                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16878                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
16879                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16880                 }
16881
16882                 vlv_dsi_init(dev_priv);
16883         } else if (IS_PINEVIEW(dev_priv)) {
16884                 intel_lvds_init(dev_priv);
16885                 intel_crt_init(dev_priv);
16886         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16887                 bool found = false;
16888
16889                 if (IS_MOBILE(dev_priv))
16890                         intel_lvds_init(dev_priv);
16891
16892                 intel_crt_init(dev_priv);
16893
16894                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16895                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
16896                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16897                         if (!found && IS_G4X(dev_priv)) {
16898                                 drm_dbg_kms(&dev_priv->drm,
16899                                             "probing HDMI on SDVOB\n");
16900                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16901                         }
16902
16903                         if (!found && IS_G4X(dev_priv))
16904                                 intel_dp_init(dev_priv, DP_B, PORT_B);
16905                 }
16906
16907                 /* Before G4X SDVOC doesn't have its own detect register */
16908
16909                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16910                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
16911                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16912                 }
16913
16914                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
16915
16916                         if (IS_G4X(dev_priv)) {
16917                                 drm_dbg_kms(&dev_priv->drm,
16918                                             "probing HDMI on SDVOC\n");
16919                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16920                         }
16921                         if (IS_G4X(dev_priv))
16922                                 intel_dp_init(dev_priv, DP_C, PORT_C);
16923                 }
16924
16925                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
16926                         intel_dp_init(dev_priv, DP_D, PORT_D);
16927
16928                 if (SUPPORTS_TV(dev_priv))
16929                         intel_tv_init(dev_priv);
16930         } else if (IS_GEN(dev_priv, 2)) {
16931                 if (IS_I85X(dev_priv))
16932                         intel_lvds_init(dev_priv);
16933
16934                 intel_crt_init(dev_priv);
16935                 intel_dvo_init(dev_priv);
16936         }
16937
16938         intel_psr_init(dev_priv);
16939
16940         for_each_intel_encoder(&dev_priv->drm, encoder) {
16941                 encoder->base.possible_crtcs =
16942                         intel_encoder_possible_crtcs(encoder);
16943                 encoder->base.possible_clones =
16944                         intel_encoder_possible_clones(encoder);
16945         }
16946
16947         intel_init_pch_refclk(dev_priv);
16948
16949         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16950 }
16951
16952 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16953 {
16954         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16955
16956         drm_framebuffer_cleanup(fb);
16957         intel_frontbuffer_put(intel_fb->frontbuffer);
16958
16959         kfree(intel_fb);
16960 }
16961
16962 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16963                                                 struct drm_file *file,
16964                                                 unsigned int *handle)
16965 {
16966         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16967         struct drm_i915_private *i915 = to_i915(obj->base.dev);
16968
16969         if (obj->userptr.mm) {
16970                 drm_dbg(&i915->drm,
16971                         "attempting to use a userptr for a framebuffer, denied\n");
16972                 return -EINVAL;
16973         }
16974
16975         return drm_gem_handle_create(file, &obj->base, handle);
16976 }
16977
16978 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16979                                         struct drm_file *file,
16980                                         unsigned flags, unsigned color,
16981                                         struct drm_clip_rect *clips,
16982                                         unsigned num_clips)
16983 {
16984         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16985
16986         i915_gem_object_flush_if_display(obj);
16987         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16988
16989         return 0;
16990 }
16991
16992 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16993         .destroy = intel_user_framebuffer_destroy,
16994         .create_handle = intel_user_framebuffer_create_handle,
16995         .dirty = intel_user_framebuffer_dirty,
16996 };
16997
16998 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16999                                   struct drm_i915_gem_object *obj,
17000                                   struct drm_mode_fb_cmd2 *mode_cmd)
17001 {
17002         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
17003         struct drm_framebuffer *fb = &intel_fb->base;
17004         u32 max_stride;
17005         unsigned int tiling, stride;
17006         int ret = -EINVAL;
17007         int i;
17008
17009         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
17010         if (!intel_fb->frontbuffer)
17011                 return -ENOMEM;
17012
17013         i915_gem_object_lock(obj);
17014         tiling = i915_gem_object_get_tiling(obj);
17015         stride = i915_gem_object_get_stride(obj);
17016         i915_gem_object_unlock(obj);
17017
17018         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
17019                 /*
17020                  * If there's a fence, enforce that
17021                  * the fb modifier and tiling mode match.
17022                  */
17023                 if (tiling != I915_TILING_NONE &&
17024                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17025                         drm_dbg_kms(&dev_priv->drm,
17026                                     "tiling_mode doesn't match fb modifier\n");
17027                         goto err;
17028                 }
17029         } else {
17030                 if (tiling == I915_TILING_X) {
17031                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
17032                 } else if (tiling == I915_TILING_Y) {
17033                         drm_dbg_kms(&dev_priv->drm,
17034                                     "No Y tiling for legacy addfb\n");
17035                         goto err;
17036                 }
17037         }
17038
17039         if (!drm_any_plane_has_format(&dev_priv->drm,
17040                                       mode_cmd->pixel_format,
17041                                       mode_cmd->modifier[0])) {
17042                 struct drm_format_name_buf format_name;
17043
17044                 drm_dbg_kms(&dev_priv->drm,
17045                             "unsupported pixel format %s / modifier 0x%llx\n",
17046                             drm_get_format_name(mode_cmd->pixel_format,
17047                                                 &format_name),
17048                             mode_cmd->modifier[0]);
17049                 goto err;
17050         }
17051
17052         /*
17053          * gen2/3 display engine uses the fence if present,
17054          * so the tiling mode must match the fb modifier exactly.
17055          */
17056         if (INTEL_GEN(dev_priv) < 4 &&
17057             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17058                 drm_dbg_kms(&dev_priv->drm,
17059                             "tiling_mode must match fb modifier exactly on gen2/3\n");
17060                 goto err;
17061         }
17062
17063         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
17064                                          mode_cmd->modifier[0]);
17065         if (mode_cmd->pitches[0] > max_stride) {
17066                 drm_dbg_kms(&dev_priv->drm,
17067                             "%s pitch (%u) must be at most %d\n",
17068                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
17069                             "tiled" : "linear",
17070                             mode_cmd->pitches[0], max_stride);
17071                 goto err;
17072         }
17073
17074         /*
17075          * If there's a fence, enforce that
17076          * the fb pitch and fence stride match.
17077          */
17078         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
17079                 drm_dbg_kms(&dev_priv->drm,
17080                             "pitch (%d) must match tiling stride (%d)\n",
17081                             mode_cmd->pitches[0], stride);
17082                 goto err;
17083         }
17084
17085         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
17086         if (mode_cmd->offsets[0] != 0) {
17087                 drm_dbg_kms(&dev_priv->drm,
17088                             "plane 0 offset (0x%08x) must be 0\n",
17089                             mode_cmd->offsets[0]);
17090                 goto err;
17091         }
17092
17093         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
17094
17095         for (i = 0; i < fb->format->num_planes; i++) {
17096                 u32 stride_alignment;
17097
17098                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
17099                         drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
17100                                     i);
17101                         goto err;
17102                 }
17103
17104                 stride_alignment = intel_fb_stride_alignment(fb, i);
17105                 if (fb->pitches[i] & (stride_alignment - 1)) {
17106                         drm_dbg_kms(&dev_priv->drm,
17107                                     "plane %d pitch (%d) must be at least %u byte aligned\n",
17108                                     i, fb->pitches[i], stride_alignment);
17109                         goto err;
17110                 }
17111
17112                 if (is_gen12_ccs_plane(fb, i)) {
17113                         int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
17114
17115                         if (fb->pitches[i] != ccs_aux_stride) {
17116                                 drm_dbg_kms(&dev_priv->drm,
17117                                             "ccs aux plane %d pitch (%d) must be %d\n",
17118                                             i,
17119                                             fb->pitches[i], ccs_aux_stride);
17120                                 goto err;
17121                         }
17122                 }
17123
17124                 fb->obj[i] = &obj->base;
17125         }
17126
17127         ret = intel_fill_fb_info(dev_priv, fb);
17128         if (ret)
17129                 goto err;
17130
17131         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
17132         if (ret) {
17133                 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
17134                 goto err;
17135         }
17136
17137         return 0;
17138
17139 err:
17140         intel_frontbuffer_put(intel_fb->frontbuffer);
17141         return ret;
17142 }
17143
17144 static struct drm_framebuffer *
17145 intel_user_framebuffer_create(struct drm_device *dev,
17146                               struct drm_file *filp,
17147                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
17148 {
17149         struct drm_framebuffer *fb;
17150         struct drm_i915_gem_object *obj;
17151         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
17152
17153         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
17154         if (!obj)
17155                 return ERR_PTR(-ENOENT);
17156
17157         fb = intel_framebuffer_create(obj, &mode_cmd);
17158         i915_gem_object_put(obj);
17159
17160         return fb;
17161 }
17162
17163 static enum drm_mode_status
17164 intel_mode_valid(struct drm_device *dev,
17165                  const struct drm_display_mode *mode)
17166 {
17167         struct drm_i915_private *dev_priv = to_i915(dev);
17168         int hdisplay_max, htotal_max;
17169         int vdisplay_max, vtotal_max;
17170
17171         /*
17172          * Can't reject DBLSCAN here because Xorg ddxen can add piles
17173          * of DBLSCAN modes to the output's mode list when they detect
17174          * the scaling mode property on the connector. And they don't
17175          * ask the kernel to validate those modes in any way until
17176          * modeset time at which point the client gets a protocol error.
17177          * So in order to not upset those clients we silently ignore the
17178          * DBLSCAN flag on such connectors. For other connectors we will
17179          * reject modes with the DBLSCAN flag in encoder->compute_config().
17180          * And we always reject DBLSCAN modes in connector->mode_valid()
17181          * as we never want such modes on the connector's mode list.
17182          */
17183
17184         if (mode->vscan > 1)
17185                 return MODE_NO_VSCAN;
17186
17187         if (mode->flags & DRM_MODE_FLAG_HSKEW)
17188                 return MODE_H_ILLEGAL;
17189
17190         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
17191                            DRM_MODE_FLAG_NCSYNC |
17192                            DRM_MODE_FLAG_PCSYNC))
17193                 return MODE_HSYNC;
17194
17195         if (mode->flags & (DRM_MODE_FLAG_BCAST |
17196                            DRM_MODE_FLAG_PIXMUX |
17197                            DRM_MODE_FLAG_CLKDIV2))
17198                 return MODE_BAD;
17199
17200         /* Transcoder timing limits */
17201         if (INTEL_GEN(dev_priv) >= 11) {
17202                 hdisplay_max = 16384;
17203                 vdisplay_max = 8192;
17204                 htotal_max = 16384;
17205                 vtotal_max = 8192;
17206         } else if (INTEL_GEN(dev_priv) >= 9 ||
17207                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17208                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
17209                 vdisplay_max = 4096;
17210                 htotal_max = 8192;
17211                 vtotal_max = 8192;
17212         } else if (INTEL_GEN(dev_priv) >= 3) {
17213                 hdisplay_max = 4096;
17214                 vdisplay_max = 4096;
17215                 htotal_max = 8192;
17216                 vtotal_max = 8192;
17217         } else {
17218                 hdisplay_max = 2048;
17219                 vdisplay_max = 2048;
17220                 htotal_max = 4096;
17221                 vtotal_max = 4096;
17222         }
17223
17224         if (mode->hdisplay > hdisplay_max ||
17225             mode->hsync_start > htotal_max ||
17226             mode->hsync_end > htotal_max ||
17227             mode->htotal > htotal_max)
17228                 return MODE_H_ILLEGAL;
17229
17230         if (mode->vdisplay > vdisplay_max ||
17231             mode->vsync_start > vtotal_max ||
17232             mode->vsync_end > vtotal_max ||
17233             mode->vtotal > vtotal_max)
17234                 return MODE_V_ILLEGAL;
17235
17236         if (INTEL_GEN(dev_priv) >= 5) {
17237                 if (mode->hdisplay < 64 ||
17238                     mode->htotal - mode->hdisplay < 32)
17239                         return MODE_H_ILLEGAL;
17240
17241                 if (mode->vtotal - mode->vdisplay < 5)
17242                         return MODE_V_ILLEGAL;
17243         } else {
17244                 if (mode->htotal - mode->hdisplay < 32)
17245                         return MODE_H_ILLEGAL;
17246
17247                 if (mode->vtotal - mode->vdisplay < 3)
17248                         return MODE_V_ILLEGAL;
17249         }
17250
17251         return MODE_OK;
17252 }
17253
17254 enum drm_mode_status
17255 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
17256                                 const struct drm_display_mode *mode)
17257 {
17258         int plane_width_max, plane_height_max;
17259
17260         /*
17261          * intel_mode_valid() should be
17262          * sufficient on older platforms.
17263          */
17264         if (INTEL_GEN(dev_priv) < 9)
17265                 return MODE_OK;
17266
17267         /*
17268          * Most people will probably want a fullscreen
17269          * plane so let's not advertize modes that are
17270          * too big for that.
17271          */
17272         if (INTEL_GEN(dev_priv) >= 11) {
17273                 plane_width_max = 5120;
17274                 plane_height_max = 4320;
17275         } else {
17276                 plane_width_max = 5120;
17277                 plane_height_max = 4096;
17278         }
17279
17280         if (mode->hdisplay > plane_width_max)
17281                 return MODE_H_ILLEGAL;
17282
17283         if (mode->vdisplay > plane_height_max)
17284                 return MODE_V_ILLEGAL;
17285
17286         return MODE_OK;
17287 }
17288
17289 static const struct drm_mode_config_funcs intel_mode_funcs = {
17290         .fb_create = intel_user_framebuffer_create,
17291         .get_format_info = intel_get_format_info,
17292         .output_poll_changed = intel_fbdev_output_poll_changed,
17293         .mode_valid = intel_mode_valid,
17294         .atomic_check = intel_atomic_check,
17295         .atomic_commit = intel_atomic_commit,
17296         .atomic_state_alloc = intel_atomic_state_alloc,
17297         .atomic_state_clear = intel_atomic_state_clear,
17298         .atomic_state_free = intel_atomic_state_free,
17299 };
17300
17301 /**
17302  * intel_init_display_hooks - initialize the display modesetting hooks
17303  * @dev_priv: device private
17304  */
17305 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
17306 {
17307         intel_init_cdclk_hooks(dev_priv);
17308
17309         if (INTEL_GEN(dev_priv) >= 9) {
17310                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17311                 dev_priv->display.get_initial_plane_config =
17312                         skl_get_initial_plane_config;
17313                 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
17314                 dev_priv->display.crtc_enable = hsw_crtc_enable;
17315                 dev_priv->display.crtc_disable = hsw_crtc_disable;
17316         } else if (HAS_DDI(dev_priv)) {
17317                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17318                 dev_priv->display.get_initial_plane_config =
17319                         i9xx_get_initial_plane_config;
17320                 dev_priv->display.crtc_compute_clock =
17321                         hsw_crtc_compute_clock;
17322                 dev_priv->display.crtc_enable = hsw_crtc_enable;
17323                 dev_priv->display.crtc_disable = hsw_crtc_disable;
17324         } else if (HAS_PCH_SPLIT(dev_priv)) {
17325                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
17326                 dev_priv->display.get_initial_plane_config =
17327                         i9xx_get_initial_plane_config;
17328                 dev_priv->display.crtc_compute_clock =
17329                         ilk_crtc_compute_clock;
17330                 dev_priv->display.crtc_enable = ilk_crtc_enable;
17331                 dev_priv->display.crtc_disable = ilk_crtc_disable;
17332         } else if (IS_CHERRYVIEW(dev_priv)) {
17333                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17334                 dev_priv->display.get_initial_plane_config =
17335                         i9xx_get_initial_plane_config;
17336                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
17337                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17338                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17339         } else if (IS_VALLEYVIEW(dev_priv)) {
17340                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17341                 dev_priv->display.get_initial_plane_config =
17342                         i9xx_get_initial_plane_config;
17343                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
17344                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17345                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17346         } else if (IS_G4X(dev_priv)) {
17347                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17348                 dev_priv->display.get_initial_plane_config =
17349                         i9xx_get_initial_plane_config;
17350                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
17351                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17352                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17353         } else if (IS_PINEVIEW(dev_priv)) {
17354                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17355                 dev_priv->display.get_initial_plane_config =
17356                         i9xx_get_initial_plane_config;
17357                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
17358                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17359                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17360         } else if (!IS_GEN(dev_priv, 2)) {
17361                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17362                 dev_priv->display.get_initial_plane_config =
17363                         i9xx_get_initial_plane_config;
17364                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
17365                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17366                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17367         } else {
17368                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17369                 dev_priv->display.get_initial_plane_config =
17370                         i9xx_get_initial_plane_config;
17371                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
17372                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17373                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17374         }
17375
17376         if (IS_GEN(dev_priv, 5)) {
17377                 dev_priv->display.fdi_link_train = ilk_fdi_link_train;
17378         } else if (IS_GEN(dev_priv, 6)) {
17379                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
17380         } else if (IS_IVYBRIDGE(dev_priv)) {
17381                 /* FIXME: detect B0+ stepping and use auto training */
17382                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
17383         }
17384
17385         if (INTEL_GEN(dev_priv) >= 9)
17386                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
17387         else
17388                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
17389
17390 }
17391
17392 void intel_modeset_init_hw(struct drm_i915_private *i915)
17393 {
17394         struct intel_cdclk_state *cdclk_state =
17395                 to_intel_cdclk_state(i915->cdclk.obj.state);
17396
17397         intel_update_cdclk(i915);
17398         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
17399         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
17400 }
17401
17402 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
17403 {
17404         struct drm_plane *plane;
17405         struct drm_crtc *crtc;
17406
17407         drm_for_each_crtc(crtc, state->dev) {
17408                 struct drm_crtc_state *crtc_state;
17409
17410                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
17411                 if (IS_ERR(crtc_state))
17412                         return PTR_ERR(crtc_state);
17413         }
17414
17415         drm_for_each_plane(plane, state->dev) {
17416                 struct drm_plane_state *plane_state;
17417
17418                 plane_state = drm_atomic_get_plane_state(state, plane);
17419                 if (IS_ERR(plane_state))
17420                         return PTR_ERR(plane_state);
17421         }
17422
17423         return 0;
17424 }
17425
17426 /*
17427  * Calculate what we think the watermarks should be for the state we've read
17428  * out of the hardware and then immediately program those watermarks so that
17429  * we ensure the hardware settings match our internal state.
17430  *
17431  * We can calculate what we think WM's should be by creating a duplicate of the
17432  * current state (which was constructed during hardware readout) and running it
17433  * through the atomic check code to calculate new watermark values in the
17434  * state object.
17435  */
17436 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
17437 {
17438         struct drm_atomic_state *state;
17439         struct intel_atomic_state *intel_state;
17440         struct intel_crtc *crtc;
17441         struct intel_crtc_state *crtc_state;
17442         struct drm_modeset_acquire_ctx ctx;
17443         int ret;
17444         int i;
17445
17446         /* Only supported on platforms that use atomic watermark design */
17447         if (!dev_priv->display.optimize_watermarks)
17448                 return;
17449
17450         state = drm_atomic_state_alloc(&dev_priv->drm);
17451         if (drm_WARN_ON(&dev_priv->drm, !state))
17452                 return;
17453
17454         intel_state = to_intel_atomic_state(state);
17455
17456         drm_modeset_acquire_init(&ctx, 0);
17457
17458 retry:
17459         state->acquire_ctx = &ctx;
17460
17461         /*
17462          * Hardware readout is the only time we don't want to calculate
17463          * intermediate watermarks (since we don't trust the current
17464          * watermarks).
17465          */
17466         if (!HAS_GMCH(dev_priv))
17467                 intel_state->skip_intermediate_wm = true;
17468
17469         ret = sanitize_watermarks_add_affected(state);
17470         if (ret)
17471                 goto fail;
17472
17473         ret = intel_atomic_check(&dev_priv->drm, state);
17474         if (ret)
17475                 goto fail;
17476
17477         /* Write calculated watermark values back */
17478         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
17479                 crtc_state->wm.need_postvbl_update = true;
17480                 dev_priv->display.optimize_watermarks(intel_state, crtc);
17481
17482                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
17483         }
17484
17485 fail:
17486         if (ret == -EDEADLK) {
17487                 drm_atomic_state_clear(state);
17488                 drm_modeset_backoff(&ctx);
17489                 goto retry;
17490         }
17491
17492         /*
17493          * If we fail here, it means that the hardware appears to be
17494          * programmed in a way that shouldn't be possible, given our
17495          * understanding of watermark requirements.  This might mean a
17496          * mistake in the hardware readout code or a mistake in the
17497          * watermark calculations for a given platform.  Raise a WARN
17498          * so that this is noticeable.
17499          *
17500          * If this actually happens, we'll have to just leave the
17501          * BIOS-programmed watermarks untouched and hope for the best.
17502          */
17503         drm_WARN(&dev_priv->drm, ret,
17504                  "Could not determine valid watermarks for inherited state\n");
17505
17506         drm_atomic_state_put(state);
17507
17508         drm_modeset_drop_locks(&ctx);
17509         drm_modeset_acquire_fini(&ctx);
17510 }
17511
17512 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
17513 {
17514         if (IS_GEN(dev_priv, 5)) {
17515                 u32 fdi_pll_clk =
17516                         intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
17517
17518                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
17519         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
17520                 dev_priv->fdi_pll_freq = 270000;
17521         } else {
17522                 return;
17523         }
17524
17525         drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
17526 }
17527
17528 static int intel_initial_commit(struct drm_device *dev)
17529 {
17530         struct drm_atomic_state *state = NULL;
17531         struct drm_modeset_acquire_ctx ctx;
17532         struct intel_crtc *crtc;
17533         int ret = 0;
17534
17535         state = drm_atomic_state_alloc(dev);
17536         if (!state)
17537                 return -ENOMEM;
17538
17539         drm_modeset_acquire_init(&ctx, 0);
17540
17541 retry:
17542         state->acquire_ctx = &ctx;
17543
17544         for_each_intel_crtc(dev, crtc) {
17545                 struct intel_crtc_state *crtc_state =
17546                         intel_atomic_get_crtc_state(state, crtc);
17547
17548                 if (IS_ERR(crtc_state)) {
17549                         ret = PTR_ERR(crtc_state);
17550                         goto out;
17551                 }
17552
17553                 if (crtc_state->hw.active) {
17554                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
17555                         if (ret)
17556                                 goto out;
17557
17558                         /*
17559                          * FIXME hack to force a LUT update to avoid the
17560                          * plane update forcing the pipe gamma on without
17561                          * having a proper LUT loaded. Remove once we
17562                          * have readout for pipe gamma enable.
17563                          */
17564                         crtc_state->uapi.color_mgmt_changed = true;
17565
17566                         /*
17567                          * FIXME hack to force full modeset when DSC is being
17568                          * used.
17569                          *
17570                          * As long as we do not have full state readout and
17571                          * config comparison of crtc_state->dsc, we have no way
17572                          * to ensure reliable fastset. Remove once we have
17573                          * readout for DSC.
17574                          */
17575                         if (crtc_state->dsc.compression_enable) {
17576                                 ret = drm_atomic_add_affected_connectors(state,
17577                                                                          &crtc->base);
17578                                 if (ret)
17579                                         goto out;
17580                                 crtc_state->uapi.mode_changed = true;
17581                                 drm_dbg_kms(dev, "Force full modeset for DSC\n");
17582                         }
17583                 }
17584         }
17585
17586         ret = drm_atomic_commit(state);
17587
17588 out:
17589         if (ret == -EDEADLK) {
17590                 drm_atomic_state_clear(state);
17591                 drm_modeset_backoff(&ctx);
17592                 goto retry;
17593         }
17594
17595         drm_atomic_state_put(state);
17596
17597         drm_modeset_drop_locks(&ctx);
17598         drm_modeset_acquire_fini(&ctx);
17599
17600         return ret;
17601 }
17602
17603 static void intel_mode_config_init(struct drm_i915_private *i915)
17604 {
17605         struct drm_mode_config *mode_config = &i915->drm.mode_config;
17606
17607         drm_mode_config_init(&i915->drm);
17608         INIT_LIST_HEAD(&i915->global_obj_list);
17609
17610         mode_config->min_width = 0;
17611         mode_config->min_height = 0;
17612
17613         mode_config->preferred_depth = 24;
17614         mode_config->prefer_shadow = 1;
17615
17616         mode_config->allow_fb_modifiers = true;
17617
17618         mode_config->funcs = &intel_mode_funcs;
17619
17620         /*
17621          * Maximum framebuffer dimensions, chosen to match
17622          * the maximum render engine surface size on gen4+.
17623          */
17624         if (INTEL_GEN(i915) >= 7) {
17625                 mode_config->max_width = 16384;
17626                 mode_config->max_height = 16384;
17627         } else if (INTEL_GEN(i915) >= 4) {
17628                 mode_config->max_width = 8192;
17629                 mode_config->max_height = 8192;
17630         } else if (IS_GEN(i915, 3)) {
17631                 mode_config->max_width = 4096;
17632                 mode_config->max_height = 4096;
17633         } else {
17634                 mode_config->max_width = 2048;
17635                 mode_config->max_height = 2048;
17636         }
17637
17638         if (IS_I845G(i915) || IS_I865G(i915)) {
17639                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17640                 mode_config->cursor_height = 1023;
17641         } else if (IS_GEN(i915, 2)) {
17642                 mode_config->cursor_width = 64;
17643                 mode_config->cursor_height = 64;
17644         } else {
17645                 mode_config->cursor_width = 256;
17646                 mode_config->cursor_height = 256;
17647         }
17648 }
17649
17650 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
17651 {
17652         intel_atomic_global_obj_cleanup(i915);
17653         drm_mode_config_cleanup(&i915->drm);
17654 }
17655
17656 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
17657 {
17658         if (plane_config->fb) {
17659                 struct drm_framebuffer *fb = &plane_config->fb->base;
17660
17661                 /* We may only have the stub and not a full framebuffer */
17662                 if (drm_framebuffer_read_refcount(fb))
17663                         drm_framebuffer_put(fb);
17664                 else
17665                         kfree(fb);
17666         }
17667
17668         if (plane_config->vma)
17669                 i915_vma_put(plane_config->vma);
17670 }
17671
17672 /* part #1: call before irq install */
17673 int intel_modeset_init_noirq(struct drm_i915_private *i915)
17674 {
17675         int ret;
17676
17677         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17678         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17679                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17680
17681         intel_mode_config_init(i915);
17682
17683         ret = intel_cdclk_init(i915);
17684         if (ret)
17685                 return ret;
17686
17687         ret = intel_bw_init(i915);
17688         if (ret)
17689                 return ret;
17690
17691         init_llist_head(&i915->atomic_helper.free_list);
17692         INIT_WORK(&i915->atomic_helper.free_work,
17693                   intel_atomic_helper_free_state_worker);
17694
17695         intel_init_quirks(i915);
17696
17697         intel_fbc_init(i915);
17698
17699         return 0;
17700 }
17701
17702 /* part #2: call after irq install */
17703 int intel_modeset_init(struct drm_i915_private *i915)
17704 {
17705         struct drm_device *dev = &i915->drm;
17706         enum pipe pipe;
17707         struct intel_crtc *crtc;
17708         int ret;
17709
17710         intel_init_pm(i915);
17711
17712         intel_panel_sanitize_ssc(i915);
17713
17714         intel_gmbus_setup(i915);
17715
17716         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
17717                     INTEL_NUM_PIPES(i915),
17718                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17719
17720         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17721                 for_each_pipe(i915, pipe) {
17722                         ret = intel_crtc_init(i915, pipe);
17723                         if (ret) {
17724                                 intel_mode_config_cleanup(i915);
17725                                 return ret;
17726                         }
17727                 }
17728         }
17729
17730         intel_plane_possible_crtcs_init(i915);
17731         intel_shared_dpll_init(dev);
17732         intel_update_fdi_pll_freq(i915);
17733
17734         intel_update_czclk(i915);
17735         intel_modeset_init_hw(i915);
17736
17737         intel_hdcp_component_init(i915);
17738
17739         if (i915->max_cdclk_freq == 0)
17740                 intel_update_max_cdclk(i915);
17741
17742         /* Just disable it once at startup */
17743         intel_vga_disable(i915);
17744         intel_setup_outputs(i915);
17745
17746         drm_modeset_lock_all(dev);
17747         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17748         drm_modeset_unlock_all(dev);
17749
17750         for_each_intel_crtc(dev, crtc) {
17751                 struct intel_initial_plane_config plane_config = {};
17752
17753                 if (!crtc->active)
17754                         continue;
17755
17756                 /*
17757                  * Note that reserving the BIOS fb up front prevents us
17758                  * from stuffing other stolen allocations like the ring
17759                  * on top.  This prevents some ugliness at boot time, and
17760                  * can even allow for smooth boot transitions if the BIOS
17761                  * fb is large enough for the active pipe configuration.
17762                  */
17763                 i915->display.get_initial_plane_config(crtc, &plane_config);
17764
17765                 /*
17766                  * If the fb is shared between multiple heads, we'll
17767                  * just get the first one.
17768                  */
17769                 intel_find_initial_plane_obj(crtc, &plane_config);
17770
17771                 plane_config_fini(&plane_config);
17772         }
17773
17774         /*
17775          * Make sure hardware watermarks really match the state we read out.
17776          * Note that we need to do this after reconstructing the BIOS fb's
17777          * since the watermark calculation done here will use pstate->fb.
17778          */
17779         if (!HAS_GMCH(i915))
17780                 sanitize_watermarks(i915);
17781
17782         /*
17783          * Force all active planes to recompute their states. So that on
17784          * mode_setcrtc after probe, all the intel_plane_state variables
17785          * are already calculated and there is no assert_plane warnings
17786          * during bootup.
17787          */
17788         ret = intel_initial_commit(dev);
17789         if (ret)
17790                 drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
17791
17792         return 0;
17793 }
17794
17795 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17796 {
17797         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17798         /* 640x480@60Hz, ~25175 kHz */
17799         struct dpll clock = {
17800                 .m1 = 18,
17801                 .m2 = 7,
17802                 .p1 = 13,
17803                 .p2 = 4,
17804                 .n = 2,
17805         };
17806         u32 dpll, fp;
17807         int i;
17808
17809         drm_WARN_ON(&dev_priv->drm,
17810                     i9xx_calc_dpll_params(48000, &clock) != 25154);
17811
17812         drm_dbg_kms(&dev_priv->drm,
17813                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17814                     pipe_name(pipe), clock.vco, clock.dot);
17815
17816         fp = i9xx_dpll_compute_fp(&clock);
17817         dpll = DPLL_DVO_2X_MODE |
17818                 DPLL_VGA_MODE_DIS |
17819                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17820                 PLL_P2_DIVIDE_BY_4 |
17821                 PLL_REF_INPUT_DREFCLK |
17822                 DPLL_VCO_ENABLE;
17823
17824         intel_de_write(dev_priv, FP0(pipe), fp);
17825         intel_de_write(dev_priv, FP1(pipe), fp);
17826
17827         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17828         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17829         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17830         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17831         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17832         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17833         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17834
17835         /*
17836          * Apparently we need to have VGA mode enabled prior to changing
17837          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17838          * dividers, even though the register value does change.
17839          */
17840         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17841         intel_de_write(dev_priv, DPLL(pipe), dpll);
17842
17843         /* Wait for the clocks to stabilize. */
17844         intel_de_posting_read(dev_priv, DPLL(pipe));
17845         udelay(150);
17846
17847         /* The pixel multiplier can only be updated once the
17848          * DPLL is enabled and the clocks are stable.
17849          *
17850          * So write it again.
17851          */
17852         intel_de_write(dev_priv, DPLL(pipe), dpll);
17853
17854         /* We do this three times for luck */
17855         for (i = 0; i < 3 ; i++) {
17856                 intel_de_write(dev_priv, DPLL(pipe), dpll);
17857                 intel_de_posting_read(dev_priv, DPLL(pipe));
17858                 udelay(150); /* wait for warmup */
17859         }
17860
17861         intel_de_write(dev_priv, PIPECONF(pipe),
17862                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17863         intel_de_posting_read(dev_priv, PIPECONF(pipe));
17864
17865         intel_wait_for_pipe_scanline_moving(crtc);
17866 }
17867
17868 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17869 {
17870         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17871
17872         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
17873                     pipe_name(pipe));
17874
17875         drm_WARN_ON(&dev_priv->drm,
17876                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
17877                     DISPLAY_PLANE_ENABLE);
17878         drm_WARN_ON(&dev_priv->drm,
17879                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
17880                     DISPLAY_PLANE_ENABLE);
17881         drm_WARN_ON(&dev_priv->drm,
17882                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
17883                     DISPLAY_PLANE_ENABLE);
17884         drm_WARN_ON(&dev_priv->drm,
17885                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
17886         drm_WARN_ON(&dev_priv->drm,
17887                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
17888
17889         intel_de_write(dev_priv, PIPECONF(pipe), 0);
17890         intel_de_posting_read(dev_priv, PIPECONF(pipe));
17891
17892         intel_wait_for_pipe_scanline_stopped(crtc);
17893
17894         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
17895         intel_de_posting_read(dev_priv, DPLL(pipe));
17896 }
17897
17898 static void
17899 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17900 {
17901         struct intel_crtc *crtc;
17902
17903         if (INTEL_GEN(dev_priv) >= 4)
17904                 return;
17905
17906         for_each_intel_crtc(&dev_priv->drm, crtc) {
17907                 struct intel_plane *plane =
17908                         to_intel_plane(crtc->base.primary);
17909                 struct intel_crtc *plane_crtc;
17910                 enum pipe pipe;
17911
17912                 if (!plane->get_hw_state(plane, &pipe))
17913                         continue;
17914
17915                 if (pipe == crtc->pipe)
17916                         continue;
17917
17918                 drm_dbg_kms(&dev_priv->drm,
17919                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17920                             plane->base.base.id, plane->base.name);
17921
17922                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17923                 intel_plane_disable_noatomic(plane_crtc, plane);
17924         }
17925 }
17926
17927 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17928 {
17929         struct drm_device *dev = crtc->base.dev;
17930         struct intel_encoder *encoder;
17931
17932         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17933                 return true;
17934
17935         return false;
17936 }
17937
17938 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17939 {
17940         struct drm_device *dev = encoder->base.dev;
17941         struct intel_connector *connector;
17942
17943         for_each_connector_on_encoder(dev, &encoder->base, connector)
17944                 return connector;
17945
17946         return NULL;
17947 }
17948
17949 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17950                               enum pipe pch_transcoder)
17951 {
17952         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17953                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17954 }
17955
17956 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17957 {
17958         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17959         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17960         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17961
17962         if (INTEL_GEN(dev_priv) >= 9 ||
17963             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17964                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17965                 u32 val;
17966
17967                 if (transcoder_is_dsi(cpu_transcoder))
17968                         return;
17969
17970                 val = intel_de_read(dev_priv, reg);
17971                 val &= ~HSW_FRAME_START_DELAY_MASK;
17972                 val |= HSW_FRAME_START_DELAY(0);
17973                 intel_de_write(dev_priv, reg, val);
17974         } else {
17975                 i915_reg_t reg = PIPECONF(cpu_transcoder);
17976                 u32 val;
17977
17978                 val = intel_de_read(dev_priv, reg);
17979                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17980                 val |= PIPECONF_FRAME_START_DELAY(0);
17981                 intel_de_write(dev_priv, reg, val);
17982         }
17983
17984         if (!crtc_state->has_pch_encoder)
17985                 return;
17986
17987         if (HAS_PCH_IBX(dev_priv)) {
17988                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17989                 u32 val;
17990
17991                 val = intel_de_read(dev_priv, reg);
17992                 val &= ~TRANS_FRAME_START_DELAY_MASK;
17993                 val |= TRANS_FRAME_START_DELAY(0);
17994                 intel_de_write(dev_priv, reg, val);
17995         } else {
17996                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
17997                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
17998                 u32 val;
17999
18000                 val = intel_de_read(dev_priv, reg);
18001                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
18002                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
18003                 intel_de_write(dev_priv, reg, val);
18004         }
18005 }
18006
18007 static void intel_sanitize_crtc(struct intel_crtc *crtc,
18008                                 struct drm_modeset_acquire_ctx *ctx)
18009 {
18010         struct drm_device *dev = crtc->base.dev;
18011         struct drm_i915_private *dev_priv = to_i915(dev);
18012         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
18013
18014         if (crtc_state->hw.active) {
18015                 struct intel_plane *plane;
18016
18017                 /* Clear any frame start delays used for debugging left by the BIOS */
18018                 intel_sanitize_frame_start_delay(crtc_state);
18019
18020                 /* Disable everything but the primary plane */
18021                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
18022                         const struct intel_plane_state *plane_state =
18023                                 to_intel_plane_state(plane->base.state);
18024
18025                         if (plane_state->uapi.visible &&
18026                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
18027                                 intel_plane_disable_noatomic(crtc, plane);
18028                 }
18029
18030                 /*
18031                  * Disable any background color set by the BIOS, but enable the
18032                  * gamma and CSC to match how we program our planes.
18033                  */
18034                 if (INTEL_GEN(dev_priv) >= 9)
18035                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
18036                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
18037         }
18038
18039         /* Adjust the state of the output pipe according to whether we
18040          * have active connectors/encoders. */
18041         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
18042                 intel_crtc_disable_noatomic(crtc, ctx);
18043
18044         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
18045                 /*
18046                  * We start out with underrun reporting disabled to avoid races.
18047                  * For correct bookkeeping mark this on active crtcs.
18048                  *
18049                  * Also on gmch platforms we dont have any hardware bits to
18050                  * disable the underrun reporting. Which means we need to start
18051                  * out with underrun reporting disabled also on inactive pipes,
18052                  * since otherwise we'll complain about the garbage we read when
18053                  * e.g. coming up after runtime pm.
18054                  *
18055                  * No protection against concurrent access is required - at
18056                  * worst a fifo underrun happens which also sets this to false.
18057                  */
18058                 crtc->cpu_fifo_underrun_disabled = true;
18059                 /*
18060                  * We track the PCH trancoder underrun reporting state
18061                  * within the crtc. With crtc for pipe A housing the underrun
18062                  * reporting state for PCH transcoder A, crtc for pipe B housing
18063                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
18064                  * and marking underrun reporting as disabled for the non-existing
18065                  * PCH transcoders B and C would prevent enabling the south
18066                  * error interrupt (see cpt_can_enable_serr_int()).
18067                  */
18068                 if (has_pch_trancoder(dev_priv, crtc->pipe))
18069                         crtc->pch_fifo_underrun_disabled = true;
18070         }
18071 }
18072
18073 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
18074 {
18075         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
18076
18077         /*
18078          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
18079          * the hardware when a high res displays plugged in. DPLL P
18080          * divider is zero, and the pipe timings are bonkers. We'll
18081          * try to disable everything in that case.
18082          *
18083          * FIXME would be nice to be able to sanitize this state
18084          * without several WARNs, but for now let's take the easy
18085          * road.
18086          */
18087         return IS_GEN(dev_priv, 6) &&
18088                 crtc_state->hw.active &&
18089                 crtc_state->shared_dpll &&
18090                 crtc_state->port_clock == 0;
18091 }
18092
18093 static void intel_sanitize_encoder(struct intel_encoder *encoder)
18094 {
18095         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
18096         struct intel_connector *connector;
18097         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18098         struct intel_crtc_state *crtc_state = crtc ?
18099                 to_intel_crtc_state(crtc->base.state) : NULL;
18100
18101         /* We need to check both for a crtc link (meaning that the
18102          * encoder is active and trying to read from a pipe) and the
18103          * pipe itself being active. */
18104         bool has_active_crtc = crtc_state &&
18105                 crtc_state->hw.active;
18106
18107         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
18108                 drm_dbg_kms(&dev_priv->drm,
18109                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
18110                             pipe_name(crtc->pipe));
18111                 has_active_crtc = false;
18112         }
18113
18114         connector = intel_encoder_find_connector(encoder);
18115         if (connector && !has_active_crtc) {
18116                 drm_dbg_kms(&dev_priv->drm,
18117                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
18118                             encoder->base.base.id,
18119                             encoder->base.name);
18120
18121                 /* Connector is active, but has no active pipe. This is
18122                  * fallout from our resume register restoring. Disable
18123                  * the encoder manually again. */
18124                 if (crtc_state) {
18125                         struct drm_encoder *best_encoder;
18126
18127                         drm_dbg_kms(&dev_priv->drm,
18128                                     "[ENCODER:%d:%s] manually disabled\n",
18129                                     encoder->base.base.id,
18130                                     encoder->base.name);
18131
18132                         /* avoid oopsing in case the hooks consult best_encoder */
18133                         best_encoder = connector->base.state->best_encoder;
18134                         connector->base.state->best_encoder = &encoder->base;
18135
18136                         /* FIXME NULL atomic state passed! */
18137                         if (encoder->disable)
18138                                 encoder->disable(NULL, encoder, crtc_state,
18139                                                  connector->base.state);
18140                         if (encoder->post_disable)
18141                                 encoder->post_disable(NULL, encoder, crtc_state,
18142                                                       connector->base.state);
18143
18144                         connector->base.state->best_encoder = best_encoder;
18145                 }
18146                 encoder->base.crtc = NULL;
18147
18148                 /* Inconsistent output/port/pipe state happens presumably due to
18149                  * a bug in one of the get_hw_state functions. Or someplace else
18150                  * in our code, like the register restore mess on resume. Clamp
18151                  * things to off as a safer default. */
18152
18153                 connector->base.dpms = DRM_MODE_DPMS_OFF;
18154                 connector->base.encoder = NULL;
18155         }
18156
18157         /* notify opregion of the sanitized encoder state */
18158         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
18159
18160         if (INTEL_GEN(dev_priv) >= 11)
18161                 icl_sanitize_encoder_pll_mapping(encoder);
18162 }
18163
18164 /* FIXME read out full plane state for all planes */
18165 static void readout_plane_state(struct drm_i915_private *dev_priv)
18166 {
18167         struct intel_plane *plane;
18168         struct intel_crtc *crtc;
18169
18170         for_each_intel_plane(&dev_priv->drm, plane) {
18171                 struct intel_plane_state *plane_state =
18172                         to_intel_plane_state(plane->base.state);
18173                 struct intel_crtc_state *crtc_state;
18174                 enum pipe pipe = PIPE_A;
18175                 bool visible;
18176
18177                 visible = plane->get_hw_state(plane, &pipe);
18178
18179                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18180                 crtc_state = to_intel_crtc_state(crtc->base.state);
18181
18182                 intel_set_plane_visible(crtc_state, plane_state, visible);
18183
18184                 drm_dbg_kms(&dev_priv->drm,
18185                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
18186                             plane->base.base.id, plane->base.name,
18187                             enableddisabled(visible), pipe_name(pipe));
18188         }
18189
18190         for_each_intel_crtc(&dev_priv->drm, crtc) {
18191                 struct intel_crtc_state *crtc_state =
18192                         to_intel_crtc_state(crtc->base.state);
18193
18194                 fixup_active_planes(crtc_state);
18195         }
18196 }
18197
18198 static void intel_modeset_readout_hw_state(struct drm_device *dev)
18199 {
18200         struct drm_i915_private *dev_priv = to_i915(dev);
18201         struct intel_cdclk_state *cdclk_state =
18202                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
18203         enum pipe pipe;
18204         struct intel_crtc *crtc;
18205         struct intel_encoder *encoder;
18206         struct intel_connector *connector;
18207         struct drm_connector_list_iter conn_iter;
18208         u8 active_pipes = 0;
18209
18210         for_each_intel_crtc(dev, crtc) {
18211                 struct intel_crtc_state *crtc_state =
18212                         to_intel_crtc_state(crtc->base.state);
18213
18214                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
18215                 intel_crtc_free_hw_state(crtc_state);
18216                 intel_crtc_state_reset(crtc_state, crtc);
18217
18218                 crtc_state->hw.active = crtc_state->hw.enable =
18219                         dev_priv->display.get_pipe_config(crtc, crtc_state);
18220
18221                 crtc->base.enabled = crtc_state->hw.enable;
18222                 crtc->active = crtc_state->hw.active;
18223
18224                 if (crtc_state->hw.active)
18225                         active_pipes |= BIT(crtc->pipe);
18226
18227                 drm_dbg_kms(&dev_priv->drm,
18228                             "[CRTC:%d:%s] hw state readout: %s\n",
18229                             crtc->base.base.id, crtc->base.name,
18230                             enableddisabled(crtc_state->hw.active));
18231         }
18232
18233         dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
18234
18235         readout_plane_state(dev_priv);
18236
18237         intel_dpll_readout_hw_state(dev_priv);
18238
18239         for_each_intel_encoder(dev, encoder) {
18240                 pipe = 0;
18241
18242                 if (encoder->get_hw_state(encoder, &pipe)) {
18243                         struct intel_crtc_state *crtc_state;
18244
18245                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18246                         crtc_state = to_intel_crtc_state(crtc->base.state);
18247
18248                         encoder->base.crtc = &crtc->base;
18249                         encoder->get_config(encoder, crtc_state);
18250                 } else {
18251                         encoder->base.crtc = NULL;
18252                 }
18253
18254                 drm_dbg_kms(&dev_priv->drm,
18255                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
18256                             encoder->base.base.id, encoder->base.name,
18257                             enableddisabled(encoder->base.crtc),
18258                             pipe_name(pipe));
18259         }
18260
18261         drm_connector_list_iter_begin(dev, &conn_iter);
18262         for_each_intel_connector_iter(connector, &conn_iter) {
18263                 if (connector->get_hw_state(connector)) {
18264                         struct intel_crtc_state *crtc_state;
18265                         struct intel_crtc *crtc;
18266
18267                         connector->base.dpms = DRM_MODE_DPMS_ON;
18268
18269                         encoder = intel_attached_encoder(connector);
18270                         connector->base.encoder = &encoder->base;
18271
18272                         crtc = to_intel_crtc(encoder->base.crtc);
18273                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
18274
18275                         if (crtc_state && crtc_state->hw.active) {
18276                                 /*
18277                                  * This has to be done during hardware readout
18278                                  * because anything calling .crtc_disable may
18279                                  * rely on the connector_mask being accurate.
18280                                  */
18281                                 crtc_state->uapi.connector_mask |=
18282                                         drm_connector_mask(&connector->base);
18283                                 crtc_state->uapi.encoder_mask |=
18284                                         drm_encoder_mask(&encoder->base);
18285                         }
18286                 } else {
18287                         connector->base.dpms = DRM_MODE_DPMS_OFF;
18288                         connector->base.encoder = NULL;
18289                 }
18290                 drm_dbg_kms(&dev_priv->drm,
18291                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
18292                             connector->base.base.id, connector->base.name,
18293                             enableddisabled(connector->base.encoder));
18294         }
18295         drm_connector_list_iter_end(&conn_iter);
18296
18297         for_each_intel_crtc(dev, crtc) {
18298                 struct intel_bw_state *bw_state =
18299                         to_intel_bw_state(dev_priv->bw_obj.state);
18300                 struct intel_crtc_state *crtc_state =
18301                         to_intel_crtc_state(crtc->base.state);
18302                 struct intel_plane *plane;
18303                 int min_cdclk = 0;
18304
18305                 if (crtc_state->hw.active) {
18306                         struct drm_display_mode *mode = &crtc_state->hw.mode;
18307
18308                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
18309                                                     crtc_state);
18310
18311                         *mode = crtc_state->hw.adjusted_mode;
18312                         mode->hdisplay = crtc_state->pipe_src_w;
18313                         mode->vdisplay = crtc_state->pipe_src_h;
18314
18315                         /*
18316                          * The initial mode needs to be set in order to keep
18317                          * the atomic core happy. It wants a valid mode if the
18318                          * crtc's enabled, so we do the above call.
18319                          *
18320                          * But we don't set all the derived state fully, hence
18321                          * set a flag to indicate that a full recalculation is
18322                          * needed on the next commit.
18323                          */
18324                         mode->private_flags = I915_MODE_FLAG_INHERITED;
18325
18326                         intel_crtc_compute_pixel_rate(crtc_state);
18327
18328                         intel_crtc_update_active_timings(crtc_state);
18329
18330                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
18331                 }
18332
18333                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
18334                         const struct intel_plane_state *plane_state =
18335                                 to_intel_plane_state(plane->base.state);
18336
18337                         /*
18338                          * FIXME don't have the fb yet, so can't
18339                          * use intel_plane_data_rate() :(
18340                          */
18341                         if (plane_state->uapi.visible)
18342                                 crtc_state->data_rate[plane->id] =
18343                                         4 * crtc_state->pixel_rate;
18344                         /*
18345                          * FIXME don't have the fb yet, so can't
18346                          * use plane->min_cdclk() :(
18347                          */
18348                         if (plane_state->uapi.visible && plane->min_cdclk) {
18349                                 if (crtc_state->double_wide ||
18350                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
18351                                         crtc_state->min_cdclk[plane->id] =
18352                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
18353                                 else
18354                                         crtc_state->min_cdclk[plane->id] =
18355                                                 crtc_state->pixel_rate;
18356                         }
18357                         drm_dbg_kms(&dev_priv->drm,
18358                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
18359                                     plane->base.base.id, plane->base.name,
18360                                     crtc_state->min_cdclk[plane->id]);
18361                 }
18362
18363                 if (crtc_state->hw.active) {
18364                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
18365                         if (drm_WARN_ON(dev, min_cdclk < 0))
18366                                 min_cdclk = 0;
18367                 }
18368
18369                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
18370                 cdclk_state->min_voltage_level[crtc->pipe] =
18371                         crtc_state->min_voltage_level;
18372
18373                 intel_bw_crtc_update(bw_state, crtc_state);
18374
18375                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
18376         }
18377 }
18378
18379 static void
18380 get_encoder_power_domains(struct drm_i915_private *dev_priv)
18381 {
18382         struct intel_encoder *encoder;
18383
18384         for_each_intel_encoder(&dev_priv->drm, encoder) {
18385                 struct intel_crtc_state *crtc_state;
18386
18387                 if (!encoder->get_power_domains)
18388                         continue;
18389
18390                 /*
18391                  * MST-primary and inactive encoders don't have a crtc state
18392                  * and neither of these require any power domain references.
18393                  */
18394                 if (!encoder->base.crtc)
18395                         continue;
18396
18397                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
18398                 encoder->get_power_domains(encoder, crtc_state);
18399         }
18400 }
18401
18402 static void intel_early_display_was(struct drm_i915_private *dev_priv)
18403 {
18404         /*
18405          * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
18406          * Also known as Wa_14010480278.
18407          */
18408         if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
18409                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
18410                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
18411
18412         if (IS_HASWELL(dev_priv)) {
18413                 /*
18414                  * WaRsPkgCStateDisplayPMReq:hsw
18415                  * System hang if this isn't done before disabling all planes!
18416                  */
18417                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
18418                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
18419         }
18420 }
18421
18422 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
18423                                        enum port port, i915_reg_t hdmi_reg)
18424 {
18425         u32 val = intel_de_read(dev_priv, hdmi_reg);
18426
18427         if (val & SDVO_ENABLE ||
18428             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
18429                 return;
18430
18431         drm_dbg_kms(&dev_priv->drm,
18432                     "Sanitizing transcoder select for HDMI %c\n",
18433                     port_name(port));
18434
18435         val &= ~SDVO_PIPE_SEL_MASK;
18436         val |= SDVO_PIPE_SEL(PIPE_A);
18437
18438         intel_de_write(dev_priv, hdmi_reg, val);
18439 }
18440
18441 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
18442                                      enum port port, i915_reg_t dp_reg)
18443 {
18444         u32 val = intel_de_read(dev_priv, dp_reg);
18445
18446         if (val & DP_PORT_EN ||
18447             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
18448                 return;
18449
18450         drm_dbg_kms(&dev_priv->drm,
18451                     "Sanitizing transcoder select for DP %c\n",
18452                     port_name(port));
18453
18454         val &= ~DP_PIPE_SEL_MASK;
18455         val |= DP_PIPE_SEL(PIPE_A);
18456
18457         intel_de_write(dev_priv, dp_reg, val);
18458 }
18459
18460 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
18461 {
18462         /*
18463          * The BIOS may select transcoder B on some of the PCH
18464          * ports even it doesn't enable the port. This would trip
18465          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
18466          * Sanitize the transcoder select bits to prevent that. We
18467          * assume that the BIOS never actually enabled the port,
18468          * because if it did we'd actually have to toggle the port
18469          * on and back off to make the transcoder A select stick
18470          * (see. intel_dp_link_down(), intel_disable_hdmi(),
18471          * intel_disable_sdvo()).
18472          */
18473         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
18474         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
18475         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
18476
18477         /* PCH SDVOB multiplex with HDMIB */
18478         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
18479         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
18480         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
18481 }
18482
18483 /* Scan out the current hw modeset state,
18484  * and sanitizes it to the current state
18485  */
18486 static void
18487 intel_modeset_setup_hw_state(struct drm_device *dev,
18488                              struct drm_modeset_acquire_ctx *ctx)
18489 {
18490         struct drm_i915_private *dev_priv = to_i915(dev);
18491         struct intel_encoder *encoder;
18492         struct intel_crtc *crtc;
18493         intel_wakeref_t wakeref;
18494
18495         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
18496
18497         intel_early_display_was(dev_priv);
18498         intel_modeset_readout_hw_state(dev);
18499
18500         /* HW state is read out, now we need to sanitize this mess. */
18501
18502         /* Sanitize the TypeC port mode upfront, encoders depend on this */
18503         for_each_intel_encoder(dev, encoder) {
18504                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
18505
18506                 /* We need to sanitize only the MST primary port. */
18507                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
18508                     intel_phy_is_tc(dev_priv, phy))
18509                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
18510         }
18511
18512         get_encoder_power_domains(dev_priv);
18513
18514         if (HAS_PCH_IBX(dev_priv))
18515                 ibx_sanitize_pch_ports(dev_priv);
18516
18517         /*
18518          * intel_sanitize_plane_mapping() may need to do vblank
18519          * waits, so we need vblank interrupts restored beforehand.
18520          */
18521         for_each_intel_crtc(&dev_priv->drm, crtc) {
18522                 struct intel_crtc_state *crtc_state =
18523                         to_intel_crtc_state(crtc->base.state);
18524
18525                 drm_crtc_vblank_reset(&crtc->base);
18526
18527                 if (crtc_state->hw.active)
18528                         intel_crtc_vblank_on(crtc_state);
18529         }
18530
18531         intel_sanitize_plane_mapping(dev_priv);
18532
18533         for_each_intel_encoder(dev, encoder)
18534                 intel_sanitize_encoder(encoder);
18535
18536         for_each_intel_crtc(&dev_priv->drm, crtc) {
18537                 struct intel_crtc_state *crtc_state =
18538                         to_intel_crtc_state(crtc->base.state);
18539
18540                 intel_sanitize_crtc(crtc, ctx);
18541                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
18542         }
18543
18544         intel_modeset_update_connector_atomic_state(dev);
18545
18546         intel_dpll_sanitize_state(dev_priv);
18547
18548         if (IS_G4X(dev_priv)) {
18549                 g4x_wm_get_hw_state(dev_priv);
18550                 g4x_wm_sanitize(dev_priv);
18551         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
18552                 vlv_wm_get_hw_state(dev_priv);
18553                 vlv_wm_sanitize(dev_priv);
18554         } else if (INTEL_GEN(dev_priv) >= 9) {
18555                 skl_wm_get_hw_state(dev_priv);
18556         } else if (HAS_PCH_SPLIT(dev_priv)) {
18557                 ilk_wm_get_hw_state(dev_priv);
18558         }
18559
18560         for_each_intel_crtc(dev, crtc) {
18561                 struct intel_crtc_state *crtc_state =
18562                         to_intel_crtc_state(crtc->base.state);
18563                 u64 put_domains;
18564
18565                 put_domains = modeset_get_crtc_power_domains(crtc_state);
18566                 if (drm_WARN_ON(dev, put_domains))
18567                         modeset_put_power_domains(dev_priv, put_domains);
18568         }
18569
18570         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
18571 }
18572
18573 void intel_display_resume(struct drm_device *dev)
18574 {
18575         struct drm_i915_private *dev_priv = to_i915(dev);
18576         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
18577         struct drm_modeset_acquire_ctx ctx;
18578         int ret;
18579
18580         dev_priv->modeset_restore_state = NULL;
18581         if (state)
18582                 state->acquire_ctx = &ctx;
18583
18584         drm_modeset_acquire_init(&ctx, 0);
18585
18586         while (1) {
18587                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
18588                 if (ret != -EDEADLK)
18589                         break;
18590
18591                 drm_modeset_backoff(&ctx);
18592         }
18593
18594         if (!ret)
18595                 ret = __intel_display_resume(dev, state, &ctx);
18596
18597         intel_enable_ipc(dev_priv);
18598         drm_modeset_drop_locks(&ctx);
18599         drm_modeset_acquire_fini(&ctx);
18600
18601         if (ret)
18602                 drm_err(&dev_priv->drm,
18603                         "Restoring old state failed with %i\n", ret);
18604         if (state)
18605                 drm_atomic_state_put(state);
18606 }
18607
18608 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
18609 {
18610         struct intel_connector *connector;
18611         struct drm_connector_list_iter conn_iter;
18612
18613         /* Kill all the work that may have been queued by hpd. */
18614         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
18615         for_each_intel_connector_iter(connector, &conn_iter) {
18616                 if (connector->modeset_retry_work.func)
18617                         cancel_work_sync(&connector->modeset_retry_work);
18618                 if (connector->hdcp.shim) {
18619                         cancel_delayed_work_sync(&connector->hdcp.check_work);
18620                         cancel_work_sync(&connector->hdcp.prop_work);
18621                 }
18622         }
18623         drm_connector_list_iter_end(&conn_iter);
18624 }
18625
18626 /* part #1: call before irq uninstall */
18627 void intel_modeset_driver_remove(struct drm_i915_private *i915)
18628 {
18629         flush_workqueue(i915->flip_wq);
18630         flush_workqueue(i915->modeset_wq);
18631
18632         flush_work(&i915->atomic_helper.free_work);
18633         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
18634 }
18635
18636 /* part #2: call after irq uninstall */
18637 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
18638 {
18639         /*
18640          * Due to the hpd irq storm handling the hotplug work can re-arm the
18641          * poll handlers. Hence disable polling after hpd handling is shut down.
18642          */
18643         intel_hpd_poll_fini(i915);
18644
18645         /*
18646          * MST topology needs to be suspended so we don't have any calls to
18647          * fbdev after it's finalized. MST will be destroyed later as part of
18648          * drm_mode_config_cleanup()
18649          */
18650         intel_dp_mst_suspend(i915);
18651
18652         /* poll work can call into fbdev, hence clean that up afterwards */
18653         intel_fbdev_fini(i915);
18654
18655         intel_unregister_dsm_handler();
18656
18657         intel_fbc_global_disable(i915);
18658
18659         /* flush any delayed tasks or pending work */
18660         flush_scheduled_work();
18661
18662         intel_hdcp_component_fini(i915);
18663
18664         intel_mode_config_cleanup(i915);
18665
18666         intel_overlay_cleanup(i915);
18667
18668         intel_gmbus_teardown(i915);
18669
18670         destroy_workqueue(i915->flip_wq);
18671         destroy_workqueue(i915->modeset_wq);
18672
18673         intel_fbc_cleanup_cfb(i915);
18674 }
18675
18676 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18677
18678 struct intel_display_error_state {
18679
18680         u32 power_well_driver;
18681
18682         struct intel_cursor_error_state {
18683                 u32 control;
18684                 u32 position;
18685                 u32 base;
18686                 u32 size;
18687         } cursor[I915_MAX_PIPES];
18688
18689         struct intel_pipe_error_state {
18690                 bool power_domain_on;
18691                 u32 source;
18692                 u32 stat;
18693         } pipe[I915_MAX_PIPES];
18694
18695         struct intel_plane_error_state {
18696                 u32 control;
18697                 u32 stride;
18698                 u32 size;
18699                 u32 pos;
18700                 u32 addr;
18701                 u32 surface;
18702                 u32 tile_offset;
18703         } plane[I915_MAX_PIPES];
18704
18705         struct intel_transcoder_error_state {
18706                 bool available;
18707                 bool power_domain_on;
18708                 enum transcoder cpu_transcoder;
18709
18710                 u32 conf;
18711
18712                 u32 htotal;
18713                 u32 hblank;
18714                 u32 hsync;
18715                 u32 vtotal;
18716                 u32 vblank;
18717                 u32 vsync;
18718         } transcoder[5];
18719 };
18720
18721 struct intel_display_error_state *
18722 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18723 {
18724         struct intel_display_error_state *error;
18725         int transcoders[] = {
18726                 TRANSCODER_A,
18727                 TRANSCODER_B,
18728                 TRANSCODER_C,
18729                 TRANSCODER_D,
18730                 TRANSCODER_EDP,
18731         };
18732         int i;
18733
18734         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18735
18736         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18737                 return NULL;
18738
18739         error = kzalloc(sizeof(*error), GFP_ATOMIC);
18740         if (error == NULL)
18741                 return NULL;
18742
18743         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18744                 error->power_well_driver = intel_de_read(dev_priv,
18745                                                          HSW_PWR_WELL_CTL2);
18746
18747         for_each_pipe(dev_priv, i) {
18748                 error->pipe[i].power_domain_on =
18749                         __intel_display_power_is_enabled(dev_priv,
18750                                                          POWER_DOMAIN_PIPE(i));
18751                 if (!error->pipe[i].power_domain_on)
18752                         continue;
18753
18754                 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
18755                 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
18756                 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
18757
18758                 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
18759                 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
18760                 if (INTEL_GEN(dev_priv) <= 3) {
18761                         error->plane[i].size = intel_de_read(dev_priv,
18762                                                              DSPSIZE(i));
18763                         error->plane[i].pos = intel_de_read(dev_priv,
18764                                                             DSPPOS(i));
18765                 }
18766                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18767                         error->plane[i].addr = intel_de_read(dev_priv,
18768                                                              DSPADDR(i));
18769                 if (INTEL_GEN(dev_priv) >= 4) {
18770                         error->plane[i].surface = intel_de_read(dev_priv,
18771                                                                 DSPSURF(i));
18772                         error->plane[i].tile_offset = intel_de_read(dev_priv,
18773                                                                     DSPTILEOFF(i));
18774                 }
18775
18776                 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
18777
18778                 if (HAS_GMCH(dev_priv))
18779                         error->pipe[i].stat = intel_de_read(dev_priv,
18780                                                             PIPESTAT(i));
18781         }
18782
18783         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18784                 enum transcoder cpu_transcoder = transcoders[i];
18785
18786                 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
18787                         continue;
18788
18789                 error->transcoder[i].available = true;
18790                 error->transcoder[i].power_domain_on =
18791                         __intel_display_power_is_enabled(dev_priv,
18792                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18793                 if (!error->transcoder[i].power_domain_on)
18794                         continue;
18795
18796                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18797
18798                 error->transcoder[i].conf = intel_de_read(dev_priv,
18799                                                           PIPECONF(cpu_transcoder));
18800                 error->transcoder[i].htotal = intel_de_read(dev_priv,
18801                                                             HTOTAL(cpu_transcoder));
18802                 error->transcoder[i].hblank = intel_de_read(dev_priv,
18803                                                             HBLANK(cpu_transcoder));
18804                 error->transcoder[i].hsync = intel_de_read(dev_priv,
18805                                                            HSYNC(cpu_transcoder));
18806                 error->transcoder[i].vtotal = intel_de_read(dev_priv,
18807                                                             VTOTAL(cpu_transcoder));
18808                 error->transcoder[i].vblank = intel_de_read(dev_priv,
18809                                                             VBLANK(cpu_transcoder));
18810                 error->transcoder[i].vsync = intel_de_read(dev_priv,
18811                                                            VSYNC(cpu_transcoder));
18812         }
18813
18814         return error;
18815 }
18816
18817 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18818
18819 void
18820 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18821                                 struct intel_display_error_state *error)
18822 {
18823         struct drm_i915_private *dev_priv = m->i915;
18824         int i;
18825
18826         if (!error)
18827                 return;
18828
18829         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18830         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18831                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18832                            error->power_well_driver);
18833         for_each_pipe(dev_priv, i) {
18834                 err_printf(m, "Pipe [%d]:\n", i);
18835                 err_printf(m, "  Power: %s\n",
18836                            onoff(error->pipe[i].power_domain_on));
18837                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18838                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18839
18840                 err_printf(m, "Plane [%d]:\n", i);
18841                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18842                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18843                 if (INTEL_GEN(dev_priv) <= 3) {
18844                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18845                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18846                 }
18847                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18848                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18849                 if (INTEL_GEN(dev_priv) >= 4) {
18850                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18851                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18852                 }
18853
18854                 err_printf(m, "Cursor [%d]:\n", i);
18855                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18856                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18857                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18858         }
18859
18860         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18861                 if (!error->transcoder[i].available)
18862                         continue;
18863
18864                 err_printf(m, "CPU transcoder: %s\n",
18865                            transcoder_name(error->transcoder[i].cpu_transcoder));
18866                 err_printf(m, "  Power: %s\n",
18867                            onoff(error->transcoder[i].power_domain_on));
18868                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18869                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18870                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18871                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18872                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18873                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18874                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18875         }
18876 }
18877
18878 #endif