drm/fourcc: Fix conflicting Y41x definitions
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "i915_drv.h"
48 #include "i915_gem_clflush.h"
49 #include "i915_trace.h"
50 #include "intel_drv.h"
51 #include "intel_dsi.h"
52 #include "intel_frontbuffer.h"
53
54 #include "intel_drv.h"
55 #include "intel_dsi.h"
56 #include "intel_frontbuffer.h"
57
58 #include "i915_drv.h"
59 #include "i915_gem_clflush.h"
60 #include "i915_reset.h"
61 #include "i915_trace.h"
62
63 /* Primary plane formats for gen <= 3 */
64 static const u32 i8xx_primary_formats[] = {
65         DRM_FORMAT_C8,
66         DRM_FORMAT_RGB565,
67         DRM_FORMAT_XRGB1555,
68         DRM_FORMAT_XRGB8888,
69 };
70
71 /* Primary plane formats for gen >= 4 */
72 static const u32 i965_primary_formats[] = {
73         DRM_FORMAT_C8,
74         DRM_FORMAT_RGB565,
75         DRM_FORMAT_XRGB8888,
76         DRM_FORMAT_XBGR8888,
77         DRM_FORMAT_XRGB2101010,
78         DRM_FORMAT_XBGR2101010,
79 };
80
81 static const u64 i9xx_format_modifiers[] = {
82         I915_FORMAT_MOD_X_TILED,
83         DRM_FORMAT_MOD_LINEAR,
84         DRM_FORMAT_MOD_INVALID
85 };
86
87 /* Cursor formats */
88 static const u32 intel_cursor_formats[] = {
89         DRM_FORMAT_ARGB8888,
90 };
91
92 static const u64 cursor_format_modifiers[] = {
93         DRM_FORMAT_MOD_LINEAR,
94         DRM_FORMAT_MOD_INVALID
95 };
96
97 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
98                                 struct intel_crtc_state *pipe_config);
99 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
100                                    struct intel_crtc_state *pipe_config);
101
102 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
103                                   struct drm_i915_gem_object *obj,
104                                   struct drm_mode_fb_cmd2 *mode_cmd);
105 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
106 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
107 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
108                                          const struct intel_link_m_n *m_n,
109                                          const struct intel_link_m_n *m2_n2);
110 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
111 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
112 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
113 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
114 static void vlv_prepare_pll(struct intel_crtc *crtc,
115                             const struct intel_crtc_state *pipe_config);
116 static void chv_prepare_pll(struct intel_crtc *crtc,
117                             const struct intel_crtc_state *pipe_config);
118 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
119 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
120 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
121                                     struct intel_crtc_state *crtc_state);
122 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
123 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
124 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
125 static void intel_modeset_setup_hw_state(struct drm_device *dev,
126                                          struct drm_modeset_acquire_ctx *ctx);
127 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
128
129 struct intel_limit {
130         struct {
131                 int min, max;
132         } dot, vco, n, m, m1, m2, p, p1;
133
134         struct {
135                 int dot_limit;
136                 int p2_slow, p2_fast;
137         } p2;
138 };
139
140 /* returns HPLL frequency in kHz */
141 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
142 {
143         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
144
145         /* Obtain SKU information */
146         mutex_lock(&dev_priv->sb_lock);
147         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
148                 CCK_FUSE_HPLL_FREQ_MASK;
149         mutex_unlock(&dev_priv->sb_lock);
150
151         return vco_freq[hpll_freq] * 1000;
152 }
153
154 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
155                       const char *name, u32 reg, int ref_freq)
156 {
157         u32 val;
158         int divider;
159
160         mutex_lock(&dev_priv->sb_lock);
161         val = vlv_cck_read(dev_priv, reg);
162         mutex_unlock(&dev_priv->sb_lock);
163
164         divider = val & CCK_FREQUENCY_VALUES;
165
166         WARN((val & CCK_FREQUENCY_STATUS) !=
167              (divider << CCK_FREQUENCY_STATUS_SHIFT),
168              "%s change in progress\n", name);
169
170         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
171 }
172
173 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
174                            const char *name, u32 reg)
175 {
176         if (dev_priv->hpll_freq == 0)
177                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
178
179         return vlv_get_cck_clock(dev_priv, name, reg,
180                                  dev_priv->hpll_freq);
181 }
182
183 static void intel_update_czclk(struct drm_i915_private *dev_priv)
184 {
185         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
186                 return;
187
188         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
189                                                       CCK_CZ_CLOCK_CONTROL);
190
191         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
192 }
193
194 static inline u32 /* units of 100MHz */
195 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
196                     const struct intel_crtc_state *pipe_config)
197 {
198         if (HAS_DDI(dev_priv))
199                 return pipe_config->port_clock; /* SPLL */
200         else
201                 return dev_priv->fdi_pll_freq;
202 }
203
204 static const struct intel_limit intel_limits_i8xx_dac = {
205         .dot = { .min = 25000, .max = 350000 },
206         .vco = { .min = 908000, .max = 1512000 },
207         .n = { .min = 2, .max = 16 },
208         .m = { .min = 96, .max = 140 },
209         .m1 = { .min = 18, .max = 26 },
210         .m2 = { .min = 6, .max = 16 },
211         .p = { .min = 4, .max = 128 },
212         .p1 = { .min = 2, .max = 33 },
213         .p2 = { .dot_limit = 165000,
214                 .p2_slow = 4, .p2_fast = 2 },
215 };
216
217 static const struct intel_limit intel_limits_i8xx_dvo = {
218         .dot = { .min = 25000, .max = 350000 },
219         .vco = { .min = 908000, .max = 1512000 },
220         .n = { .min = 2, .max = 16 },
221         .m = { .min = 96, .max = 140 },
222         .m1 = { .min = 18, .max = 26 },
223         .m2 = { .min = 6, .max = 16 },
224         .p = { .min = 4, .max = 128 },
225         .p1 = { .min = 2, .max = 33 },
226         .p2 = { .dot_limit = 165000,
227                 .p2_slow = 4, .p2_fast = 4 },
228 };
229
230 static const struct intel_limit intel_limits_i8xx_lvds = {
231         .dot = { .min = 25000, .max = 350000 },
232         .vco = { .min = 908000, .max = 1512000 },
233         .n = { .min = 2, .max = 16 },
234         .m = { .min = 96, .max = 140 },
235         .m1 = { .min = 18, .max = 26 },
236         .m2 = { .min = 6, .max = 16 },
237         .p = { .min = 4, .max = 128 },
238         .p1 = { .min = 1, .max = 6 },
239         .p2 = { .dot_limit = 165000,
240                 .p2_slow = 14, .p2_fast = 7 },
241 };
242
243 static const struct intel_limit intel_limits_i9xx_sdvo = {
244         .dot = { .min = 20000, .max = 400000 },
245         .vco = { .min = 1400000, .max = 2800000 },
246         .n = { .min = 1, .max = 6 },
247         .m = { .min = 70, .max = 120 },
248         .m1 = { .min = 8, .max = 18 },
249         .m2 = { .min = 3, .max = 7 },
250         .p = { .min = 5, .max = 80 },
251         .p1 = { .min = 1, .max = 8 },
252         .p2 = { .dot_limit = 200000,
253                 .p2_slow = 10, .p2_fast = 5 },
254 };
255
256 static const struct intel_limit intel_limits_i9xx_lvds = {
257         .dot = { .min = 20000, .max = 400000 },
258         .vco = { .min = 1400000, .max = 2800000 },
259         .n = { .min = 1, .max = 6 },
260         .m = { .min = 70, .max = 120 },
261         .m1 = { .min = 8, .max = 18 },
262         .m2 = { .min = 3, .max = 7 },
263         .p = { .min = 7, .max = 98 },
264         .p1 = { .min = 1, .max = 8 },
265         .p2 = { .dot_limit = 112000,
266                 .p2_slow = 14, .p2_fast = 7 },
267 };
268
269
270 static const struct intel_limit intel_limits_g4x_sdvo = {
271         .dot = { .min = 25000, .max = 270000 },
272         .vco = { .min = 1750000, .max = 3500000},
273         .n = { .min = 1, .max = 4 },
274         .m = { .min = 104, .max = 138 },
275         .m1 = { .min = 17, .max = 23 },
276         .m2 = { .min = 5, .max = 11 },
277         .p = { .min = 10, .max = 30 },
278         .p1 = { .min = 1, .max = 3},
279         .p2 = { .dot_limit = 270000,
280                 .p2_slow = 10,
281                 .p2_fast = 10
282         },
283 };
284
285 static const struct intel_limit intel_limits_g4x_hdmi = {
286         .dot = { .min = 22000, .max = 400000 },
287         .vco = { .min = 1750000, .max = 3500000},
288         .n = { .min = 1, .max = 4 },
289         .m = { .min = 104, .max = 138 },
290         .m1 = { .min = 16, .max = 23 },
291         .m2 = { .min = 5, .max = 11 },
292         .p = { .min = 5, .max = 80 },
293         .p1 = { .min = 1, .max = 8},
294         .p2 = { .dot_limit = 165000,
295                 .p2_slow = 10, .p2_fast = 5 },
296 };
297
298 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
299         .dot = { .min = 20000, .max = 115000 },
300         .vco = { .min = 1750000, .max = 3500000 },
301         .n = { .min = 1, .max = 3 },
302         .m = { .min = 104, .max = 138 },
303         .m1 = { .min = 17, .max = 23 },
304         .m2 = { .min = 5, .max = 11 },
305         .p = { .min = 28, .max = 112 },
306         .p1 = { .min = 2, .max = 8 },
307         .p2 = { .dot_limit = 0,
308                 .p2_slow = 14, .p2_fast = 14
309         },
310 };
311
312 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
313         .dot = { .min = 80000, .max = 224000 },
314         .vco = { .min = 1750000, .max = 3500000 },
315         .n = { .min = 1, .max = 3 },
316         .m = { .min = 104, .max = 138 },
317         .m1 = { .min = 17, .max = 23 },
318         .m2 = { .min = 5, .max = 11 },
319         .p = { .min = 14, .max = 42 },
320         .p1 = { .min = 2, .max = 6 },
321         .p2 = { .dot_limit = 0,
322                 .p2_slow = 7, .p2_fast = 7
323         },
324 };
325
326 static const struct intel_limit intel_limits_pineview_sdvo = {
327         .dot = { .min = 20000, .max = 400000},
328         .vco = { .min = 1700000, .max = 3500000 },
329         /* Pineview's Ncounter is a ring counter */
330         .n = { .min = 3, .max = 6 },
331         .m = { .min = 2, .max = 256 },
332         /* Pineview only has one combined m divider, which we treat as m2. */
333         .m1 = { .min = 0, .max = 0 },
334         .m2 = { .min = 0, .max = 254 },
335         .p = { .min = 5, .max = 80 },
336         .p1 = { .min = 1, .max = 8 },
337         .p2 = { .dot_limit = 200000,
338                 .p2_slow = 10, .p2_fast = 5 },
339 };
340
341 static const struct intel_limit intel_limits_pineview_lvds = {
342         .dot = { .min = 20000, .max = 400000 },
343         .vco = { .min = 1700000, .max = 3500000 },
344         .n = { .min = 3, .max = 6 },
345         .m = { .min = 2, .max = 256 },
346         .m1 = { .min = 0, .max = 0 },
347         .m2 = { .min = 0, .max = 254 },
348         .p = { .min = 7, .max = 112 },
349         .p1 = { .min = 1, .max = 8 },
350         .p2 = { .dot_limit = 112000,
351                 .p2_slow = 14, .p2_fast = 14 },
352 };
353
354 /* Ironlake / Sandybridge
355  *
356  * We calculate clock using (register_value + 2) for N/M1/M2, so here
357  * the range value for them is (actual_value - 2).
358  */
359 static const struct intel_limit intel_limits_ironlake_dac = {
360         .dot = { .min = 25000, .max = 350000 },
361         .vco = { .min = 1760000, .max = 3510000 },
362         .n = { .min = 1, .max = 5 },
363         .m = { .min = 79, .max = 127 },
364         .m1 = { .min = 12, .max = 22 },
365         .m2 = { .min = 5, .max = 9 },
366         .p = { .min = 5, .max = 80 },
367         .p1 = { .min = 1, .max = 8 },
368         .p2 = { .dot_limit = 225000,
369                 .p2_slow = 10, .p2_fast = 5 },
370 };
371
372 static const struct intel_limit intel_limits_ironlake_single_lvds = {
373         .dot = { .min = 25000, .max = 350000 },
374         .vco = { .min = 1760000, .max = 3510000 },
375         .n = { .min = 1, .max = 3 },
376         .m = { .min = 79, .max = 118 },
377         .m1 = { .min = 12, .max = 22 },
378         .m2 = { .min = 5, .max = 9 },
379         .p = { .min = 28, .max = 112 },
380         .p1 = { .min = 2, .max = 8 },
381         .p2 = { .dot_limit = 225000,
382                 .p2_slow = 14, .p2_fast = 14 },
383 };
384
385 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
386         .dot = { .min = 25000, .max = 350000 },
387         .vco = { .min = 1760000, .max = 3510000 },
388         .n = { .min = 1, .max = 3 },
389         .m = { .min = 79, .max = 127 },
390         .m1 = { .min = 12, .max = 22 },
391         .m2 = { .min = 5, .max = 9 },
392         .p = { .min = 14, .max = 56 },
393         .p1 = { .min = 2, .max = 8 },
394         .p2 = { .dot_limit = 225000,
395                 .p2_slow = 7, .p2_fast = 7 },
396 };
397
398 /* LVDS 100mhz refclk limits. */
399 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
400         .dot = { .min = 25000, .max = 350000 },
401         .vco = { .min = 1760000, .max = 3510000 },
402         .n = { .min = 1, .max = 2 },
403         .m = { .min = 79, .max = 126 },
404         .m1 = { .min = 12, .max = 22 },
405         .m2 = { .min = 5, .max = 9 },
406         .p = { .min = 28, .max = 112 },
407         .p1 = { .min = 2, .max = 8 },
408         .p2 = { .dot_limit = 225000,
409                 .p2_slow = 14, .p2_fast = 14 },
410 };
411
412 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
413         .dot = { .min = 25000, .max = 350000 },
414         .vco = { .min = 1760000, .max = 3510000 },
415         .n = { .min = 1, .max = 3 },
416         .m = { .min = 79, .max = 126 },
417         .m1 = { .min = 12, .max = 22 },
418         .m2 = { .min = 5, .max = 9 },
419         .p = { .min = 14, .max = 42 },
420         .p1 = { .min = 2, .max = 6 },
421         .p2 = { .dot_limit = 225000,
422                 .p2_slow = 7, .p2_fast = 7 },
423 };
424
425 static const struct intel_limit intel_limits_vlv = {
426          /*
427           * These are the data rate limits (measured in fast clocks)
428           * since those are the strictest limits we have. The fast
429           * clock and actual rate limits are more relaxed, so checking
430           * them would make no difference.
431           */
432         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
433         .vco = { .min = 4000000, .max = 6000000 },
434         .n = { .min = 1, .max = 7 },
435         .m1 = { .min = 2, .max = 3 },
436         .m2 = { .min = 11, .max = 156 },
437         .p1 = { .min = 2, .max = 3 },
438         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
439 };
440
441 static const struct intel_limit intel_limits_chv = {
442         /*
443          * These are the data rate limits (measured in fast clocks)
444          * since those are the strictest limits we have.  The fast
445          * clock and actual rate limits are more relaxed, so checking
446          * them would make no difference.
447          */
448         .dot = { .min = 25000 * 5, .max = 540000 * 5},
449         .vco = { .min = 4800000, .max = 6480000 },
450         .n = { .min = 1, .max = 1 },
451         .m1 = { .min = 2, .max = 2 },
452         .m2 = { .min = 24 << 22, .max = 175 << 22 },
453         .p1 = { .min = 2, .max = 4 },
454         .p2 = { .p2_slow = 1, .p2_fast = 14 },
455 };
456
457 static const struct intel_limit intel_limits_bxt = {
458         /* FIXME: find real dot limits */
459         .dot = { .min = 0, .max = INT_MAX },
460         .vco = { .min = 4800000, .max = 6700000 },
461         .n = { .min = 1, .max = 1 },
462         .m1 = { .min = 2, .max = 2 },
463         /* FIXME: find real m2 limits */
464         .m2 = { .min = 2 << 22, .max = 255 << 22 },
465         .p1 = { .min = 2, .max = 4 },
466         .p2 = { .p2_slow = 1, .p2_fast = 20 },
467 };
468
469 static void
470 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
471 {
472         if (enable)
473                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
474                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
475         else
476                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
477                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
478                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
479 }
480
481 static bool
482 needs_modeset(const struct drm_crtc_state *state)
483 {
484         return drm_atomic_crtc_needs_modeset(state);
485 }
486
487 /*
488  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
489  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
490  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
491  * The helpers' return value is the rate of the clock that is fed to the
492  * display engine's pipe which can be the above fast dot clock rate or a
493  * divided-down version of it.
494  */
495 /* m1 is reserved as 0 in Pineview, n is a ring counter */
496 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
497 {
498         clock->m = clock->m2 + 2;
499         clock->p = clock->p1 * clock->p2;
500         if (WARN_ON(clock->n == 0 || clock->p == 0))
501                 return 0;
502         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
503         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
504
505         return clock->dot;
506 }
507
508 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
509 {
510         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
511 }
512
513 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
514 {
515         clock->m = i9xx_dpll_compute_m(clock);
516         clock->p = clock->p1 * clock->p2;
517         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
518                 return 0;
519         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
520         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
521
522         return clock->dot;
523 }
524
525 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
526 {
527         clock->m = clock->m1 * clock->m2;
528         clock->p = clock->p1 * clock->p2;
529         if (WARN_ON(clock->n == 0 || clock->p == 0))
530                 return 0;
531         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
532         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
533
534         return clock->dot / 5;
535 }
536
537 int chv_calc_dpll_params(int refclk, struct dpll *clock)
538 {
539         clock->m = clock->m1 * clock->m2;
540         clock->p = clock->p1 * clock->p2;
541         if (WARN_ON(clock->n == 0 || clock->p == 0))
542                 return 0;
543         clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
544                                            clock->n << 22);
545         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
546
547         return clock->dot / 5;
548 }
549
550 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
551
552 /*
553  * Returns whether the given set of divisors are valid for a given refclk with
554  * the given connectors.
555  */
556 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
557                                const struct intel_limit *limit,
558                                const struct dpll *clock)
559 {
560         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
561                 INTELPllInvalid("n out of range\n");
562         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
563                 INTELPllInvalid("p1 out of range\n");
564         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
565                 INTELPllInvalid("m2 out of range\n");
566         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
567                 INTELPllInvalid("m1 out of range\n");
568
569         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
570             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
571                 if (clock->m1 <= clock->m2)
572                         INTELPllInvalid("m1 <= m2\n");
573
574         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
575             !IS_GEN9_LP(dev_priv)) {
576                 if (clock->p < limit->p.min || limit->p.max < clock->p)
577                         INTELPllInvalid("p out of range\n");
578                 if (clock->m < limit->m.min || limit->m.max < clock->m)
579                         INTELPllInvalid("m out of range\n");
580         }
581
582         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
583                 INTELPllInvalid("vco out of range\n");
584         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
585          * connector, etc., rather than just a single range.
586          */
587         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
588                 INTELPllInvalid("dot out of range\n");
589
590         return true;
591 }
592
593 static int
594 i9xx_select_p2_div(const struct intel_limit *limit,
595                    const struct intel_crtc_state *crtc_state,
596                    int target)
597 {
598         struct drm_device *dev = crtc_state->base.crtc->dev;
599
600         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
601                 /*
602                  * For LVDS just rely on its current settings for dual-channel.
603                  * We haven't figured out how to reliably set up different
604                  * single/dual channel state, if we even can.
605                  */
606                 if (intel_is_dual_link_lvds(dev))
607                         return limit->p2.p2_fast;
608                 else
609                         return limit->p2.p2_slow;
610         } else {
611                 if (target < limit->p2.dot_limit)
612                         return limit->p2.p2_slow;
613                 else
614                         return limit->p2.p2_fast;
615         }
616 }
617
618 /*
619  * Returns a set of divisors for the desired target clock with the given
620  * refclk, or FALSE.  The returned values represent the clock equation:
621  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
622  *
623  * Target and reference clocks are specified in kHz.
624  *
625  * If match_clock is provided, then best_clock P divider must match the P
626  * divider from @match_clock used for LVDS downclocking.
627  */
628 static bool
629 i9xx_find_best_dpll(const struct intel_limit *limit,
630                     struct intel_crtc_state *crtc_state,
631                     int target, int refclk, struct dpll *match_clock,
632                     struct dpll *best_clock)
633 {
634         struct drm_device *dev = crtc_state->base.crtc->dev;
635         struct dpll clock;
636         int err = target;
637
638         memset(best_clock, 0, sizeof(*best_clock));
639
640         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
641
642         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
643              clock.m1++) {
644                 for (clock.m2 = limit->m2.min;
645                      clock.m2 <= limit->m2.max; clock.m2++) {
646                         if (clock.m2 >= clock.m1)
647                                 break;
648                         for (clock.n = limit->n.min;
649                              clock.n <= limit->n.max; clock.n++) {
650                                 for (clock.p1 = limit->p1.min;
651                                         clock.p1 <= limit->p1.max; clock.p1++) {
652                                         int this_err;
653
654                                         i9xx_calc_dpll_params(refclk, &clock);
655                                         if (!intel_PLL_is_valid(to_i915(dev),
656                                                                 limit,
657                                                                 &clock))
658                                                 continue;
659                                         if (match_clock &&
660                                             clock.p != match_clock->p)
661                                                 continue;
662
663                                         this_err = abs(clock.dot - target);
664                                         if (this_err < err) {
665                                                 *best_clock = clock;
666                                                 err = this_err;
667                                         }
668                                 }
669                         }
670                 }
671         }
672
673         return (err != target);
674 }
675
676 /*
677  * Returns a set of divisors for the desired target clock with the given
678  * refclk, or FALSE.  The returned values represent the clock equation:
679  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
680  *
681  * Target and reference clocks are specified in kHz.
682  *
683  * If match_clock is provided, then best_clock P divider must match the P
684  * divider from @match_clock used for LVDS downclocking.
685  */
686 static bool
687 pnv_find_best_dpll(const struct intel_limit *limit,
688                    struct intel_crtc_state *crtc_state,
689                    int target, int refclk, struct dpll *match_clock,
690                    struct dpll *best_clock)
691 {
692         struct drm_device *dev = crtc_state->base.crtc->dev;
693         struct dpll clock;
694         int err = target;
695
696         memset(best_clock, 0, sizeof(*best_clock));
697
698         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
699
700         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
701              clock.m1++) {
702                 for (clock.m2 = limit->m2.min;
703                      clock.m2 <= limit->m2.max; clock.m2++) {
704                         for (clock.n = limit->n.min;
705                              clock.n <= limit->n.max; clock.n++) {
706                                 for (clock.p1 = limit->p1.min;
707                                         clock.p1 <= limit->p1.max; clock.p1++) {
708                                         int this_err;
709
710                                         pnv_calc_dpll_params(refclk, &clock);
711                                         if (!intel_PLL_is_valid(to_i915(dev),
712                                                                 limit,
713                                                                 &clock))
714                                                 continue;
715                                         if (match_clock &&
716                                             clock.p != match_clock->p)
717                                                 continue;
718
719                                         this_err = abs(clock.dot - target);
720                                         if (this_err < err) {
721                                                 *best_clock = clock;
722                                                 err = this_err;
723                                         }
724                                 }
725                         }
726                 }
727         }
728
729         return (err != target);
730 }
731
732 /*
733  * Returns a set of divisors for the desired target clock with the given
734  * refclk, or FALSE.  The returned values represent the clock equation:
735  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
736  *
737  * Target and reference clocks are specified in kHz.
738  *
739  * If match_clock is provided, then best_clock P divider must match the P
740  * divider from @match_clock used for LVDS downclocking.
741  */
742 static bool
743 g4x_find_best_dpll(const struct intel_limit *limit,
744                    struct intel_crtc_state *crtc_state,
745                    int target, int refclk, struct dpll *match_clock,
746                    struct dpll *best_clock)
747 {
748         struct drm_device *dev = crtc_state->base.crtc->dev;
749         struct dpll clock;
750         int max_n;
751         bool found = false;
752         /* approximately equals target * 0.00585 */
753         int err_most = (target >> 8) + (target >> 9);
754
755         memset(best_clock, 0, sizeof(*best_clock));
756
757         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
758
759         max_n = limit->n.max;
760         /* based on hardware requirement, prefer smaller n to precision */
761         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
762                 /* based on hardware requirement, prefere larger m1,m2 */
763                 for (clock.m1 = limit->m1.max;
764                      clock.m1 >= limit->m1.min; clock.m1--) {
765                         for (clock.m2 = limit->m2.max;
766                              clock.m2 >= limit->m2.min; clock.m2--) {
767                                 for (clock.p1 = limit->p1.max;
768                                      clock.p1 >= limit->p1.min; clock.p1--) {
769                                         int this_err;
770
771                                         i9xx_calc_dpll_params(refclk, &clock);
772                                         if (!intel_PLL_is_valid(to_i915(dev),
773                                                                 limit,
774                                                                 &clock))
775                                                 continue;
776
777                                         this_err = abs(clock.dot - target);
778                                         if (this_err < err_most) {
779                                                 *best_clock = clock;
780                                                 err_most = this_err;
781                                                 max_n = clock.n;
782                                                 found = true;
783                                         }
784                                 }
785                         }
786                 }
787         }
788         return found;
789 }
790
791 /*
792  * Check if the calculated PLL configuration is more optimal compared to the
793  * best configuration and error found so far. Return the calculated error.
794  */
795 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
796                                const struct dpll *calculated_clock,
797                                const struct dpll *best_clock,
798                                unsigned int best_error_ppm,
799                                unsigned int *error_ppm)
800 {
801         /*
802          * For CHV ignore the error and consider only the P value.
803          * Prefer a bigger P value based on HW requirements.
804          */
805         if (IS_CHERRYVIEW(to_i915(dev))) {
806                 *error_ppm = 0;
807
808                 return calculated_clock->p > best_clock->p;
809         }
810
811         if (WARN_ON_ONCE(!target_freq))
812                 return false;
813
814         *error_ppm = div_u64(1000000ULL *
815                                 abs(target_freq - calculated_clock->dot),
816                              target_freq);
817         /*
818          * Prefer a better P value over a better (smaller) error if the error
819          * is small. Ensure this preference for future configurations too by
820          * setting the error to 0.
821          */
822         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
823                 *error_ppm = 0;
824
825                 return true;
826         }
827
828         return *error_ppm + 10 < best_error_ppm;
829 }
830
831 /*
832  * Returns a set of divisors for the desired target clock with the given
833  * refclk, or FALSE.  The returned values represent the clock equation:
834  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
835  */
836 static bool
837 vlv_find_best_dpll(const struct intel_limit *limit,
838                    struct intel_crtc_state *crtc_state,
839                    int target, int refclk, struct dpll *match_clock,
840                    struct dpll *best_clock)
841 {
842         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
843         struct drm_device *dev = crtc->base.dev;
844         struct dpll clock;
845         unsigned int bestppm = 1000000;
846         /* min update 19.2 MHz */
847         int max_n = min(limit->n.max, refclk / 19200);
848         bool found = false;
849
850         target *= 5; /* fast clock */
851
852         memset(best_clock, 0, sizeof(*best_clock));
853
854         /* based on hardware requirement, prefer smaller n to precision */
855         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
856                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
857                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
858                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
859                                 clock.p = clock.p1 * clock.p2;
860                                 /* based on hardware requirement, prefer bigger m1,m2 values */
861                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
862                                         unsigned int ppm;
863
864                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
865                                                                      refclk * clock.m1);
866
867                                         vlv_calc_dpll_params(refclk, &clock);
868
869                                         if (!intel_PLL_is_valid(to_i915(dev),
870                                                                 limit,
871                                                                 &clock))
872                                                 continue;
873
874                                         if (!vlv_PLL_is_optimal(dev, target,
875                                                                 &clock,
876                                                                 best_clock,
877                                                                 bestppm, &ppm))
878                                                 continue;
879
880                                         *best_clock = clock;
881                                         bestppm = ppm;
882                                         found = true;
883                                 }
884                         }
885                 }
886         }
887
888         return found;
889 }
890
891 /*
892  * Returns a set of divisors for the desired target clock with the given
893  * refclk, or FALSE.  The returned values represent the clock equation:
894  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
895  */
896 static bool
897 chv_find_best_dpll(const struct intel_limit *limit,
898                    struct intel_crtc_state *crtc_state,
899                    int target, int refclk, struct dpll *match_clock,
900                    struct dpll *best_clock)
901 {
902         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
903         struct drm_device *dev = crtc->base.dev;
904         unsigned int best_error_ppm;
905         struct dpll clock;
906         u64 m2;
907         int found = false;
908
909         memset(best_clock, 0, sizeof(*best_clock));
910         best_error_ppm = 1000000;
911
912         /*
913          * Based on hardware doc, the n always set to 1, and m1 always
914          * set to 2.  If requires to support 200Mhz refclk, we need to
915          * revisit this because n may not 1 anymore.
916          */
917         clock.n = 1, clock.m1 = 2;
918         target *= 5;    /* fast clock */
919
920         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
921                 for (clock.p2 = limit->p2.p2_fast;
922                                 clock.p2 >= limit->p2.p2_slow;
923                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
924                         unsigned int error_ppm;
925
926                         clock.p = clock.p1 * clock.p2;
927
928                         m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
929                                         clock.n) << 22, refclk * clock.m1);
930
931                         if (m2 > INT_MAX/clock.m1)
932                                 continue;
933
934                         clock.m2 = m2;
935
936                         chv_calc_dpll_params(refclk, &clock);
937
938                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
939                                 continue;
940
941                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
942                                                 best_error_ppm, &error_ppm))
943                                 continue;
944
945                         *best_clock = clock;
946                         best_error_ppm = error_ppm;
947                         found = true;
948                 }
949         }
950
951         return found;
952 }
953
954 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
955                         struct dpll *best_clock)
956 {
957         int refclk = 100000;
958         const struct intel_limit *limit = &intel_limits_bxt;
959
960         return chv_find_best_dpll(limit, crtc_state,
961                                   target_clock, refclk, NULL, best_clock);
962 }
963
964 bool intel_crtc_active(struct intel_crtc *crtc)
965 {
966         /* Be paranoid as we can arrive here with only partial
967          * state retrieved from the hardware during setup.
968          *
969          * We can ditch the adjusted_mode.crtc_clock check as soon
970          * as Haswell has gained clock readout/fastboot support.
971          *
972          * We can ditch the crtc->primary->state->fb check as soon as we can
973          * properly reconstruct framebuffers.
974          *
975          * FIXME: The intel_crtc->active here should be switched to
976          * crtc->state->active once we have proper CRTC states wired up
977          * for atomic.
978          */
979         return crtc->active && crtc->base.primary->state->fb &&
980                 crtc->config->base.adjusted_mode.crtc_clock;
981 }
982
983 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
984                                              enum pipe pipe)
985 {
986         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
987
988         return crtc->config->cpu_transcoder;
989 }
990
991 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
992                                     enum pipe pipe)
993 {
994         i915_reg_t reg = PIPEDSL(pipe);
995         u32 line1, line2;
996         u32 line_mask;
997
998         if (IS_GEN(dev_priv, 2))
999                 line_mask = DSL_LINEMASK_GEN2;
1000         else
1001                 line_mask = DSL_LINEMASK_GEN3;
1002
1003         line1 = I915_READ(reg) & line_mask;
1004         msleep(5);
1005         line2 = I915_READ(reg) & line_mask;
1006
1007         return line1 != line2;
1008 }
1009
1010 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1011 {
1012         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1013         enum pipe pipe = crtc->pipe;
1014
1015         /* Wait for the display line to settle/start moving */
1016         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1017                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1018                           pipe_name(pipe), onoff(state));
1019 }
1020
1021 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1022 {
1023         wait_for_pipe_scanline_moving(crtc, false);
1024 }
1025
1026 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1027 {
1028         wait_for_pipe_scanline_moving(crtc, true);
1029 }
1030
1031 static void
1032 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1033 {
1034         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1035         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1036
1037         if (INTEL_GEN(dev_priv) >= 4) {
1038                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1039                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1040
1041                 /* Wait for the Pipe State to go off */
1042                 if (intel_wait_for_register(dev_priv,
1043                                             reg, I965_PIPECONF_ACTIVE, 0,
1044                                             100))
1045                         WARN(1, "pipe_off wait timed out\n");
1046         } else {
1047                 intel_wait_for_pipe_scanline_stopped(crtc);
1048         }
1049 }
1050
1051 /* Only for pre-ILK configs */
1052 void assert_pll(struct drm_i915_private *dev_priv,
1053                 enum pipe pipe, bool state)
1054 {
1055         u32 val;
1056         bool cur_state;
1057
1058         val = I915_READ(DPLL(pipe));
1059         cur_state = !!(val & DPLL_VCO_ENABLE);
1060         I915_STATE_WARN(cur_state != state,
1061              "PLL state assertion failure (expected %s, current %s)\n",
1062                         onoff(state), onoff(cur_state));
1063 }
1064
1065 /* XXX: the dsi pll is shared between MIPI DSI ports */
1066 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1067 {
1068         u32 val;
1069         bool cur_state;
1070
1071         mutex_lock(&dev_priv->sb_lock);
1072         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1073         mutex_unlock(&dev_priv->sb_lock);
1074
1075         cur_state = val & DSI_PLL_VCO_EN;
1076         I915_STATE_WARN(cur_state != state,
1077              "DSI PLL state assertion failure (expected %s, current %s)\n",
1078                         onoff(state), onoff(cur_state));
1079 }
1080
1081 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1082                           enum pipe pipe, bool state)
1083 {
1084         bool cur_state;
1085         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1086                                                                       pipe);
1087
1088         if (HAS_DDI(dev_priv)) {
1089                 /* DDI does not have a specific FDI_TX register */
1090                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1091                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1092         } else {
1093                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1094                 cur_state = !!(val & FDI_TX_ENABLE);
1095         }
1096         I915_STATE_WARN(cur_state != state,
1097              "FDI TX state assertion failure (expected %s, current %s)\n",
1098                         onoff(state), onoff(cur_state));
1099 }
1100 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1101 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1102
1103 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1104                           enum pipe pipe, bool state)
1105 {
1106         u32 val;
1107         bool cur_state;
1108
1109         val = I915_READ(FDI_RX_CTL(pipe));
1110         cur_state = !!(val & FDI_RX_ENABLE);
1111         I915_STATE_WARN(cur_state != state,
1112              "FDI RX state assertion failure (expected %s, current %s)\n",
1113                         onoff(state), onoff(cur_state));
1114 }
1115 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1116 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1117
1118 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1119                                       enum pipe pipe)
1120 {
1121         u32 val;
1122
1123         /* ILK FDI PLL is always enabled */
1124         if (IS_GEN(dev_priv, 5))
1125                 return;
1126
1127         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1128         if (HAS_DDI(dev_priv))
1129                 return;
1130
1131         val = I915_READ(FDI_TX_CTL(pipe));
1132         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1133 }
1134
1135 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1136                        enum pipe pipe, bool state)
1137 {
1138         u32 val;
1139         bool cur_state;
1140
1141         val = I915_READ(FDI_RX_CTL(pipe));
1142         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1143         I915_STATE_WARN(cur_state != state,
1144              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1145                         onoff(state), onoff(cur_state));
1146 }
1147
1148 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1149 {
1150         i915_reg_t pp_reg;
1151         u32 val;
1152         enum pipe panel_pipe = INVALID_PIPE;
1153         bool locked = true;
1154
1155         if (WARN_ON(HAS_DDI(dev_priv)))
1156                 return;
1157
1158         if (HAS_PCH_SPLIT(dev_priv)) {
1159                 u32 port_sel;
1160
1161                 pp_reg = PP_CONTROL(0);
1162                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1163
1164                 switch (port_sel) {
1165                 case PANEL_PORT_SELECT_LVDS:
1166                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1167                         break;
1168                 case PANEL_PORT_SELECT_DPA:
1169                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1170                         break;
1171                 case PANEL_PORT_SELECT_DPC:
1172                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1173                         break;
1174                 case PANEL_PORT_SELECT_DPD:
1175                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1176                         break;
1177                 default:
1178                         MISSING_CASE(port_sel);
1179                         break;
1180                 }
1181         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1182                 /* presumably write lock depends on pipe, not port select */
1183                 pp_reg = PP_CONTROL(pipe);
1184                 panel_pipe = pipe;
1185         } else {
1186                 u32 port_sel;
1187
1188                 pp_reg = PP_CONTROL(0);
1189                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1190
1191                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1192                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1193         }
1194
1195         val = I915_READ(pp_reg);
1196         if (!(val & PANEL_POWER_ON) ||
1197             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1198                 locked = false;
1199
1200         I915_STATE_WARN(panel_pipe == pipe && locked,
1201              "panel assertion failure, pipe %c regs locked\n",
1202              pipe_name(pipe));
1203 }
1204
1205 void assert_pipe(struct drm_i915_private *dev_priv,
1206                  enum pipe pipe, bool state)
1207 {
1208         bool cur_state;
1209         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1210                                                                       pipe);
1211         enum intel_display_power_domain power_domain;
1212         intel_wakeref_t wakeref;
1213
1214         /* we keep both pipes enabled on 830 */
1215         if (IS_I830(dev_priv))
1216                 state = true;
1217
1218         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1219         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1220         if (wakeref) {
1221                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1222                 cur_state = !!(val & PIPECONF_ENABLE);
1223
1224                 intel_display_power_put(dev_priv, power_domain, wakeref);
1225         } else {
1226                 cur_state = false;
1227         }
1228
1229         I915_STATE_WARN(cur_state != state,
1230              "pipe %c assertion failure (expected %s, current %s)\n",
1231                         pipe_name(pipe), onoff(state), onoff(cur_state));
1232 }
1233
1234 static void assert_plane(struct intel_plane *plane, bool state)
1235 {
1236         enum pipe pipe;
1237         bool cur_state;
1238
1239         cur_state = plane->get_hw_state(plane, &pipe);
1240
1241         I915_STATE_WARN(cur_state != state,
1242                         "%s assertion failure (expected %s, current %s)\n",
1243                         plane->base.name, onoff(state), onoff(cur_state));
1244 }
1245
1246 #define assert_plane_enabled(p) assert_plane(p, true)
1247 #define assert_plane_disabled(p) assert_plane(p, false)
1248
1249 static void assert_planes_disabled(struct intel_crtc *crtc)
1250 {
1251         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1252         struct intel_plane *plane;
1253
1254         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1255                 assert_plane_disabled(plane);
1256 }
1257
1258 static void assert_vblank_disabled(struct drm_crtc *crtc)
1259 {
1260         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1261                 drm_crtc_vblank_put(crtc);
1262 }
1263
1264 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1265                                     enum pipe pipe)
1266 {
1267         u32 val;
1268         bool enabled;
1269
1270         val = I915_READ(PCH_TRANSCONF(pipe));
1271         enabled = !!(val & TRANS_ENABLE);
1272         I915_STATE_WARN(enabled,
1273              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1274              pipe_name(pipe));
1275 }
1276
1277 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1278                                    enum pipe pipe, enum port port,
1279                                    i915_reg_t dp_reg)
1280 {
1281         enum pipe port_pipe;
1282         bool state;
1283
1284         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1285
1286         I915_STATE_WARN(state && port_pipe == pipe,
1287                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1288                         port_name(port), pipe_name(pipe));
1289
1290         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1291                         "IBX PCH DP %c still using transcoder B\n",
1292                         port_name(port));
1293 }
1294
1295 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1296                                      enum pipe pipe, enum port port,
1297                                      i915_reg_t hdmi_reg)
1298 {
1299         enum pipe port_pipe;
1300         bool state;
1301
1302         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1303
1304         I915_STATE_WARN(state && port_pipe == pipe,
1305                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1306                         port_name(port), pipe_name(pipe));
1307
1308         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1309                         "IBX PCH HDMI %c still using transcoder B\n",
1310                         port_name(port));
1311 }
1312
1313 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1314                                       enum pipe pipe)
1315 {
1316         enum pipe port_pipe;
1317
1318         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1319         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1320         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1321
1322         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1323                         port_pipe == pipe,
1324                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1325                         pipe_name(pipe));
1326
1327         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1328                         port_pipe == pipe,
1329                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1330                         pipe_name(pipe));
1331
1332         /* PCH SDVOB multiplex with HDMIB */
1333         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1334         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1335         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1336 }
1337
1338 static void _vlv_enable_pll(struct intel_crtc *crtc,
1339                             const struct intel_crtc_state *pipe_config)
1340 {
1341         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1342         enum pipe pipe = crtc->pipe;
1343
1344         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1345         POSTING_READ(DPLL(pipe));
1346         udelay(150);
1347
1348         if (intel_wait_for_register(dev_priv,
1349                                     DPLL(pipe),
1350                                     DPLL_LOCK_VLV,
1351                                     DPLL_LOCK_VLV,
1352                                     1))
1353                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1354 }
1355
1356 static void vlv_enable_pll(struct intel_crtc *crtc,
1357                            const struct intel_crtc_state *pipe_config)
1358 {
1359         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1360         enum pipe pipe = crtc->pipe;
1361
1362         assert_pipe_disabled(dev_priv, pipe);
1363
1364         /* PLL is protected by panel, make sure we can write it */
1365         assert_panel_unlocked(dev_priv, pipe);
1366
1367         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1368                 _vlv_enable_pll(crtc, pipe_config);
1369
1370         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1371         POSTING_READ(DPLL_MD(pipe));
1372 }
1373
1374
1375 static void _chv_enable_pll(struct intel_crtc *crtc,
1376                             const struct intel_crtc_state *pipe_config)
1377 {
1378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379         enum pipe pipe = crtc->pipe;
1380         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1381         u32 tmp;
1382
1383         mutex_lock(&dev_priv->sb_lock);
1384
1385         /* Enable back the 10bit clock to display controller */
1386         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1387         tmp |= DPIO_DCLKP_EN;
1388         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1389
1390         mutex_unlock(&dev_priv->sb_lock);
1391
1392         /*
1393          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1394          */
1395         udelay(1);
1396
1397         /* Enable PLL */
1398         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1399
1400         /* Check PLL is locked */
1401         if (intel_wait_for_register(dev_priv,
1402                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1403                                     1))
1404                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1405 }
1406
1407 static void chv_enable_pll(struct intel_crtc *crtc,
1408                            const struct intel_crtc_state *pipe_config)
1409 {
1410         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1411         enum pipe pipe = crtc->pipe;
1412
1413         assert_pipe_disabled(dev_priv, pipe);
1414
1415         /* PLL is protected by panel, make sure we can write it */
1416         assert_panel_unlocked(dev_priv, pipe);
1417
1418         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1419                 _chv_enable_pll(crtc, pipe_config);
1420
1421         if (pipe != PIPE_A) {
1422                 /*
1423                  * WaPixelRepeatModeFixForC0:chv
1424                  *
1425                  * DPLLCMD is AWOL. Use chicken bits to propagate
1426                  * the value from DPLLBMD to either pipe B or C.
1427                  */
1428                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1429                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1430                 I915_WRITE(CBR4_VLV, 0);
1431                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1432
1433                 /*
1434                  * DPLLB VGA mode also seems to cause problems.
1435                  * We should always have it disabled.
1436                  */
1437                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1438         } else {
1439                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1440                 POSTING_READ(DPLL_MD(pipe));
1441         }
1442 }
1443
1444 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1445 {
1446         struct intel_crtc *crtc;
1447         int count = 0;
1448
1449         for_each_intel_crtc(&dev_priv->drm, crtc) {
1450                 count += crtc->base.state->active &&
1451                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1452         }
1453
1454         return count;
1455 }
1456
1457 static void i9xx_enable_pll(struct intel_crtc *crtc,
1458                             const struct intel_crtc_state *crtc_state)
1459 {
1460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1461         i915_reg_t reg = DPLL(crtc->pipe);
1462         u32 dpll = crtc_state->dpll_hw_state.dpll;
1463         int i;
1464
1465         assert_pipe_disabled(dev_priv, crtc->pipe);
1466
1467         /* PLL is protected by panel, make sure we can write it */
1468         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1469                 assert_panel_unlocked(dev_priv, crtc->pipe);
1470
1471         /* Enable DVO 2x clock on both PLLs if necessary */
1472         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1473                 /*
1474                  * It appears to be important that we don't enable this
1475                  * for the current pipe before otherwise configuring the
1476                  * PLL. No idea how this should be handled if multiple
1477                  * DVO outputs are enabled simultaneosly.
1478                  */
1479                 dpll |= DPLL_DVO_2X_MODE;
1480                 I915_WRITE(DPLL(!crtc->pipe),
1481                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1482         }
1483
1484         /*
1485          * Apparently we need to have VGA mode enabled prior to changing
1486          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1487          * dividers, even though the register value does change.
1488          */
1489         I915_WRITE(reg, 0);
1490
1491         I915_WRITE(reg, dpll);
1492
1493         /* Wait for the clocks to stabilize. */
1494         POSTING_READ(reg);
1495         udelay(150);
1496
1497         if (INTEL_GEN(dev_priv) >= 4) {
1498                 I915_WRITE(DPLL_MD(crtc->pipe),
1499                            crtc_state->dpll_hw_state.dpll_md);
1500         } else {
1501                 /* The pixel multiplier can only be updated once the
1502                  * DPLL is enabled and the clocks are stable.
1503                  *
1504                  * So write it again.
1505                  */
1506                 I915_WRITE(reg, dpll);
1507         }
1508
1509         /* We do this three times for luck */
1510         for (i = 0; i < 3; i++) {
1511                 I915_WRITE(reg, dpll);
1512                 POSTING_READ(reg);
1513                 udelay(150); /* wait for warmup */
1514         }
1515 }
1516
1517 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1518 {
1519         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1520         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1521         enum pipe pipe = crtc->pipe;
1522
1523         /* Disable DVO 2x clock on both PLLs if necessary */
1524         if (IS_I830(dev_priv) &&
1525             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
1526             !intel_num_dvo_pipes(dev_priv)) {
1527                 I915_WRITE(DPLL(PIPE_B),
1528                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1529                 I915_WRITE(DPLL(PIPE_A),
1530                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1531         }
1532
1533         /* Don't disable pipe or pipe PLLs if needed */
1534         if (IS_I830(dev_priv))
1535                 return;
1536
1537         /* Make sure the pipe isn't still relying on us */
1538         assert_pipe_disabled(dev_priv, pipe);
1539
1540         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1541         POSTING_READ(DPLL(pipe));
1542 }
1543
1544 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1545 {
1546         u32 val;
1547
1548         /* Make sure the pipe isn't still relying on us */
1549         assert_pipe_disabled(dev_priv, pipe);
1550
1551         val = DPLL_INTEGRATED_REF_CLK_VLV |
1552                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1553         if (pipe != PIPE_A)
1554                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1555
1556         I915_WRITE(DPLL(pipe), val);
1557         POSTING_READ(DPLL(pipe));
1558 }
1559
1560 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1561 {
1562         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1563         u32 val;
1564
1565         /* Make sure the pipe isn't still relying on us */
1566         assert_pipe_disabled(dev_priv, pipe);
1567
1568         val = DPLL_SSC_REF_CLK_CHV |
1569                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1570         if (pipe != PIPE_A)
1571                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1572
1573         I915_WRITE(DPLL(pipe), val);
1574         POSTING_READ(DPLL(pipe));
1575
1576         mutex_lock(&dev_priv->sb_lock);
1577
1578         /* Disable 10bit clock to display controller */
1579         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1580         val &= ~DPIO_DCLKP_EN;
1581         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1582
1583         mutex_unlock(&dev_priv->sb_lock);
1584 }
1585
1586 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1587                          struct intel_digital_port *dport,
1588                          unsigned int expected_mask)
1589 {
1590         u32 port_mask;
1591         i915_reg_t dpll_reg;
1592
1593         switch (dport->base.port) {
1594         case PORT_B:
1595                 port_mask = DPLL_PORTB_READY_MASK;
1596                 dpll_reg = DPLL(0);
1597                 break;
1598         case PORT_C:
1599                 port_mask = DPLL_PORTC_READY_MASK;
1600                 dpll_reg = DPLL(0);
1601                 expected_mask <<= 4;
1602                 break;
1603         case PORT_D:
1604                 port_mask = DPLL_PORTD_READY_MASK;
1605                 dpll_reg = DPIO_PHY_STATUS;
1606                 break;
1607         default:
1608                 BUG();
1609         }
1610
1611         if (intel_wait_for_register(dev_priv,
1612                                     dpll_reg, port_mask, expected_mask,
1613                                     1000))
1614                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1615                      port_name(dport->base.port),
1616                      I915_READ(dpll_reg) & port_mask, expected_mask);
1617 }
1618
1619 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1620 {
1621         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1622         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1623         enum pipe pipe = crtc->pipe;
1624         i915_reg_t reg;
1625         u32 val, pipeconf_val;
1626
1627         /* Make sure PCH DPLL is enabled */
1628         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1629
1630         /* FDI must be feeding us bits for PCH ports */
1631         assert_fdi_tx_enabled(dev_priv, pipe);
1632         assert_fdi_rx_enabled(dev_priv, pipe);
1633
1634         if (HAS_PCH_CPT(dev_priv)) {
1635                 /* Workaround: Set the timing override bit before enabling the
1636                  * pch transcoder. */
1637                 reg = TRANS_CHICKEN2(pipe);
1638                 val = I915_READ(reg);
1639                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1640                 I915_WRITE(reg, val);
1641         }
1642
1643         reg = PCH_TRANSCONF(pipe);
1644         val = I915_READ(reg);
1645         pipeconf_val = I915_READ(PIPECONF(pipe));
1646
1647         if (HAS_PCH_IBX(dev_priv)) {
1648                 /*
1649                  * Make the BPC in transcoder be consistent with
1650                  * that in pipeconf reg. For HDMI we must use 8bpc
1651                  * here for both 8bpc and 12bpc.
1652                  */
1653                 val &= ~PIPECONF_BPC_MASK;
1654                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1655                         val |= PIPECONF_8BPC;
1656                 else
1657                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1658         }
1659
1660         val &= ~TRANS_INTERLACE_MASK;
1661         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1662                 if (HAS_PCH_IBX(dev_priv) &&
1663                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1664                         val |= TRANS_LEGACY_INTERLACED_ILK;
1665                 else
1666                         val |= TRANS_INTERLACED;
1667         else
1668                 val |= TRANS_PROGRESSIVE;
1669
1670         I915_WRITE(reg, val | TRANS_ENABLE);
1671         if (intel_wait_for_register(dev_priv,
1672                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1673                                     100))
1674                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1675 }
1676
1677 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1678                                       enum transcoder cpu_transcoder)
1679 {
1680         u32 val, pipeconf_val;
1681
1682         /* FDI must be feeding us bits for PCH ports */
1683         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1684         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1685
1686         /* Workaround: set timing override bit. */
1687         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1688         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1690
1691         val = TRANS_ENABLE;
1692         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1693
1694         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1695             PIPECONF_INTERLACED_ILK)
1696                 val |= TRANS_INTERLACED;
1697         else
1698                 val |= TRANS_PROGRESSIVE;
1699
1700         I915_WRITE(LPT_TRANSCONF, val);
1701         if (intel_wait_for_register(dev_priv,
1702                                     LPT_TRANSCONF,
1703                                     TRANS_STATE_ENABLE,
1704                                     TRANS_STATE_ENABLE,
1705                                     100))
1706                 DRM_ERROR("Failed to enable PCH transcoder\n");
1707 }
1708
1709 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1710                                             enum pipe pipe)
1711 {
1712         i915_reg_t reg;
1713         u32 val;
1714
1715         /* FDI relies on the transcoder */
1716         assert_fdi_tx_disabled(dev_priv, pipe);
1717         assert_fdi_rx_disabled(dev_priv, pipe);
1718
1719         /* Ports must be off as well */
1720         assert_pch_ports_disabled(dev_priv, pipe);
1721
1722         reg = PCH_TRANSCONF(pipe);
1723         val = I915_READ(reg);
1724         val &= ~TRANS_ENABLE;
1725         I915_WRITE(reg, val);
1726         /* wait for PCH transcoder off, transcoder state */
1727         if (intel_wait_for_register(dev_priv,
1728                                     reg, TRANS_STATE_ENABLE, 0,
1729                                     50))
1730                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1731
1732         if (HAS_PCH_CPT(dev_priv)) {
1733                 /* Workaround: Clear the timing override chicken bit again. */
1734                 reg = TRANS_CHICKEN2(pipe);
1735                 val = I915_READ(reg);
1736                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1737                 I915_WRITE(reg, val);
1738         }
1739 }
1740
1741 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1742 {
1743         u32 val;
1744
1745         val = I915_READ(LPT_TRANSCONF);
1746         val &= ~TRANS_ENABLE;
1747         I915_WRITE(LPT_TRANSCONF, val);
1748         /* wait for PCH transcoder off, transcoder state */
1749         if (intel_wait_for_register(dev_priv,
1750                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1751                                     50))
1752                 DRM_ERROR("Failed to disable PCH transcoder\n");
1753
1754         /* Workaround: clear timing override bit. */
1755         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1756         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1757         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1758 }
1759
1760 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1761 {
1762         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1763
1764         if (HAS_PCH_LPT(dev_priv))
1765                 return PIPE_A;
1766         else
1767                 return crtc->pipe;
1768 }
1769
1770 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1771 {
1772         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1773
1774         /*
1775          * On i965gm the hardware frame counter reads
1776          * zero when the TV encoder is enabled :(
1777          */
1778         if (IS_I965GM(dev_priv) &&
1779             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1780                 return 0;
1781
1782         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1783                 return 0xffffffff; /* full 32 bit counter */
1784         else if (INTEL_GEN(dev_priv) >= 3)
1785                 return 0xffffff; /* only 24 bits of frame count */
1786         else
1787                 return 0; /* Gen2 doesn't have a hardware frame counter */
1788 }
1789
1790 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1791 {
1792         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1793
1794         drm_crtc_set_max_vblank_count(&crtc->base,
1795                                       intel_crtc_max_vblank_count(crtc_state));
1796         drm_crtc_vblank_on(&crtc->base);
1797 }
1798
1799 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1800 {
1801         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1802         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1803         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1804         enum pipe pipe = crtc->pipe;
1805         i915_reg_t reg;
1806         u32 val;
1807
1808         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1809
1810         assert_planes_disabled(crtc);
1811
1812         /*
1813          * A pipe without a PLL won't actually be able to drive bits from
1814          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1815          * need the check.
1816          */
1817         if (HAS_GMCH(dev_priv)) {
1818                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1819                         assert_dsi_pll_enabled(dev_priv);
1820                 else
1821                         assert_pll_enabled(dev_priv, pipe);
1822         } else {
1823                 if (new_crtc_state->has_pch_encoder) {
1824                         /* if driving the PCH, we need FDI enabled */
1825                         assert_fdi_rx_pll_enabled(dev_priv,
1826                                                   intel_crtc_pch_transcoder(crtc));
1827                         assert_fdi_tx_pll_enabled(dev_priv,
1828                                                   (enum pipe) cpu_transcoder);
1829                 }
1830                 /* FIXME: assert CPU port conditions for SNB+ */
1831         }
1832
1833         reg = PIPECONF(cpu_transcoder);
1834         val = I915_READ(reg);
1835         if (val & PIPECONF_ENABLE) {
1836                 /* we keep both pipes enabled on 830 */
1837                 WARN_ON(!IS_I830(dev_priv));
1838                 return;
1839         }
1840
1841         I915_WRITE(reg, val | PIPECONF_ENABLE);
1842         POSTING_READ(reg);
1843
1844         /*
1845          * Until the pipe starts PIPEDSL reads will return a stale value,
1846          * which causes an apparent vblank timestamp jump when PIPEDSL
1847          * resets to its proper value. That also messes up the frame count
1848          * when it's derived from the timestamps. So let's wait for the
1849          * pipe to start properly before we call drm_crtc_vblank_on()
1850          */
1851         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1852                 intel_wait_for_pipe_scanline_moving(crtc);
1853 }
1854
1855 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1856 {
1857         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1858         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1859         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1860         enum pipe pipe = crtc->pipe;
1861         i915_reg_t reg;
1862         u32 val;
1863
1864         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1865
1866         /*
1867          * Make sure planes won't keep trying to pump pixels to us,
1868          * or we might hang the display.
1869          */
1870         assert_planes_disabled(crtc);
1871
1872         reg = PIPECONF(cpu_transcoder);
1873         val = I915_READ(reg);
1874         if ((val & PIPECONF_ENABLE) == 0)
1875                 return;
1876
1877         /*
1878          * Double wide has implications for planes
1879          * so best keep it disabled when not needed.
1880          */
1881         if (old_crtc_state->double_wide)
1882                 val &= ~PIPECONF_DOUBLE_WIDE;
1883
1884         /* Don't disable pipe or pipe PLLs if needed */
1885         if (!IS_I830(dev_priv))
1886                 val &= ~PIPECONF_ENABLE;
1887
1888         I915_WRITE(reg, val);
1889         if ((val & PIPECONF_ENABLE) == 0)
1890                 intel_wait_for_pipe_off(old_crtc_state);
1891 }
1892
1893 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1894 {
1895         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1896 }
1897
1898 static unsigned int
1899 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1900 {
1901         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1902         unsigned int cpp = fb->format->cpp[color_plane];
1903
1904         switch (fb->modifier) {
1905         case DRM_FORMAT_MOD_LINEAR:
1906                 return cpp;
1907         case I915_FORMAT_MOD_X_TILED:
1908                 if (IS_GEN(dev_priv, 2))
1909                         return 128;
1910                 else
1911                         return 512;
1912         case I915_FORMAT_MOD_Y_TILED_CCS:
1913                 if (color_plane == 1)
1914                         return 128;
1915                 /* fall through */
1916         case I915_FORMAT_MOD_Y_TILED:
1917                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1918                         return 128;
1919                 else
1920                         return 512;
1921         case I915_FORMAT_MOD_Yf_TILED_CCS:
1922                 if (color_plane == 1)
1923                         return 128;
1924                 /* fall through */
1925         case I915_FORMAT_MOD_Yf_TILED:
1926                 switch (cpp) {
1927                 case 1:
1928                         return 64;
1929                 case 2:
1930                 case 4:
1931                         return 128;
1932                 case 8:
1933                 case 16:
1934                         return 256;
1935                 default:
1936                         MISSING_CASE(cpp);
1937                         return cpp;
1938                 }
1939                 break;
1940         default:
1941                 MISSING_CASE(fb->modifier);
1942                 return cpp;
1943         }
1944 }
1945
1946 static unsigned int
1947 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1948 {
1949         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1950                 return 1;
1951         else
1952                 return intel_tile_size(to_i915(fb->dev)) /
1953                         intel_tile_width_bytes(fb, color_plane);
1954 }
1955
1956 /* Return the tile dimensions in pixel units */
1957 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1958                             unsigned int *tile_width,
1959                             unsigned int *tile_height)
1960 {
1961         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1962         unsigned int cpp = fb->format->cpp[color_plane];
1963
1964         *tile_width = tile_width_bytes / cpp;
1965         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1966 }
1967
1968 unsigned int
1969 intel_fb_align_height(const struct drm_framebuffer *fb,
1970                       int color_plane, unsigned int height)
1971 {
1972         unsigned int tile_height = intel_tile_height(fb, color_plane);
1973
1974         return ALIGN(height, tile_height);
1975 }
1976
1977 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1978 {
1979         unsigned int size = 0;
1980         int i;
1981
1982         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1983                 size += rot_info->plane[i].width * rot_info->plane[i].height;
1984
1985         return size;
1986 }
1987
1988 static void
1989 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1990                         const struct drm_framebuffer *fb,
1991                         unsigned int rotation)
1992 {
1993         view->type = I915_GGTT_VIEW_NORMAL;
1994         if (drm_rotation_90_or_270(rotation)) {
1995                 view->type = I915_GGTT_VIEW_ROTATED;
1996                 view->rotated = to_intel_framebuffer(fb)->rot_info;
1997         }
1998 }
1999
2000 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2001 {
2002         if (IS_I830(dev_priv))
2003                 return 16 * 1024;
2004         else if (IS_I85X(dev_priv))
2005                 return 256;
2006         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2007                 return 32;
2008         else
2009                 return 4 * 1024;
2010 }
2011
2012 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2013 {
2014         if (INTEL_GEN(dev_priv) >= 9)
2015                 return 256 * 1024;
2016         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2017                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2018                 return 128 * 1024;
2019         else if (INTEL_GEN(dev_priv) >= 4)
2020                 return 4 * 1024;
2021         else
2022                 return 0;
2023 }
2024
2025 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2026                                          int color_plane)
2027 {
2028         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2029
2030         /* AUX_DIST needs only 4K alignment */
2031         if (color_plane == 1)
2032                 return 4096;
2033
2034         switch (fb->modifier) {
2035         case DRM_FORMAT_MOD_LINEAR:
2036                 return intel_linear_alignment(dev_priv);
2037         case I915_FORMAT_MOD_X_TILED:
2038                 if (INTEL_GEN(dev_priv) >= 9)
2039                         return 256 * 1024;
2040                 return 0;
2041         case I915_FORMAT_MOD_Y_TILED_CCS:
2042         case I915_FORMAT_MOD_Yf_TILED_CCS:
2043         case I915_FORMAT_MOD_Y_TILED:
2044         case I915_FORMAT_MOD_Yf_TILED:
2045                 return 1 * 1024 * 1024;
2046         default:
2047                 MISSING_CASE(fb->modifier);
2048                 return 0;
2049         }
2050 }
2051
2052 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2053 {
2054         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2055         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2056
2057         return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2058 }
2059
2060 struct i915_vma *
2061 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2062                            const struct i915_ggtt_view *view,
2063                            bool uses_fence,
2064                            unsigned long *out_flags)
2065 {
2066         struct drm_device *dev = fb->dev;
2067         struct drm_i915_private *dev_priv = to_i915(dev);
2068         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2069         intel_wakeref_t wakeref;
2070         struct i915_vma *vma;
2071         unsigned int pinctl;
2072         u32 alignment;
2073
2074         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2075
2076         alignment = intel_surf_alignment(fb, 0);
2077
2078         /* Note that the w/a also requires 64 PTE of padding following the
2079          * bo. We currently fill all unused PTE with the shadow page and so
2080          * we should always have valid PTE following the scanout preventing
2081          * the VT-d warning.
2082          */
2083         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2084                 alignment = 256 * 1024;
2085
2086         /*
2087          * Global gtt pte registers are special registers which actually forward
2088          * writes to a chunk of system memory. Which means that there is no risk
2089          * that the register values disappear as soon as we call
2090          * intel_runtime_pm_put(), so it is correct to wrap only the
2091          * pin/unpin/fence and not more.
2092          */
2093         wakeref = intel_runtime_pm_get(dev_priv);
2094
2095         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2096
2097         pinctl = 0;
2098
2099         /* Valleyview is definitely limited to scanning out the first
2100          * 512MiB. Lets presume this behaviour was inherited from the
2101          * g4x display engine and that all earlier gen are similarly
2102          * limited. Testing suggests that it is a little more
2103          * complicated than this. For example, Cherryview appears quite
2104          * happy to scanout from anywhere within its global aperture.
2105          */
2106         if (HAS_GMCH(dev_priv))
2107                 pinctl |= PIN_MAPPABLE;
2108
2109         vma = i915_gem_object_pin_to_display_plane(obj,
2110                                                    alignment, view, pinctl);
2111         if (IS_ERR(vma))
2112                 goto err;
2113
2114         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2115                 int ret;
2116
2117                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2118                  * fence, whereas 965+ only requires a fence if using
2119                  * framebuffer compression.  For simplicity, we always, when
2120                  * possible, install a fence as the cost is not that onerous.
2121                  *
2122                  * If we fail to fence the tiled scanout, then either the
2123                  * modeset will reject the change (which is highly unlikely as
2124                  * the affected systems, all but one, do not have unmappable
2125                  * space) or we will not be able to enable full powersaving
2126                  * techniques (also likely not to apply due to various limits
2127                  * FBC and the like impose on the size of the buffer, which
2128                  * presumably we violated anyway with this unmappable buffer).
2129                  * Anyway, it is presumably better to stumble onwards with
2130                  * something and try to run the system in a "less than optimal"
2131                  * mode that matches the user configuration.
2132                  */
2133                 ret = i915_vma_pin_fence(vma);
2134                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2135                         i915_gem_object_unpin_from_display_plane(vma);
2136                         vma = ERR_PTR(ret);
2137                         goto err;
2138                 }
2139
2140                 if (ret == 0 && vma->fence)
2141                         *out_flags |= PLANE_HAS_FENCE;
2142         }
2143
2144         i915_vma_get(vma);
2145 err:
2146         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2147
2148         intel_runtime_pm_put(dev_priv, wakeref);
2149         return vma;
2150 }
2151
2152 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2153 {
2154         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2155
2156         if (flags & PLANE_HAS_FENCE)
2157                 i915_vma_unpin_fence(vma);
2158         i915_gem_object_unpin_from_display_plane(vma);
2159         i915_vma_put(vma);
2160 }
2161
2162 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2163                           unsigned int rotation)
2164 {
2165         if (drm_rotation_90_or_270(rotation))
2166                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2167         else
2168                 return fb->pitches[color_plane];
2169 }
2170
2171 /*
2172  * Convert the x/y offsets into a linear offset.
2173  * Only valid with 0/180 degree rotation, which is fine since linear
2174  * offset is only used with linear buffers on pre-hsw and tiled buffers
2175  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2176  */
2177 u32 intel_fb_xy_to_linear(int x, int y,
2178                           const struct intel_plane_state *state,
2179                           int color_plane)
2180 {
2181         const struct drm_framebuffer *fb = state->base.fb;
2182         unsigned int cpp = fb->format->cpp[color_plane];
2183         unsigned int pitch = state->color_plane[color_plane].stride;
2184
2185         return y * pitch + x * cpp;
2186 }
2187
2188 /*
2189  * Add the x/y offsets derived from fb->offsets[] to the user
2190  * specified plane src x/y offsets. The resulting x/y offsets
2191  * specify the start of scanout from the beginning of the gtt mapping.
2192  */
2193 void intel_add_fb_offsets(int *x, int *y,
2194                           const struct intel_plane_state *state,
2195                           int color_plane)
2196
2197 {
2198         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2199         unsigned int rotation = state->base.rotation;
2200
2201         if (drm_rotation_90_or_270(rotation)) {
2202                 *x += intel_fb->rotated[color_plane].x;
2203                 *y += intel_fb->rotated[color_plane].y;
2204         } else {
2205                 *x += intel_fb->normal[color_plane].x;
2206                 *y += intel_fb->normal[color_plane].y;
2207         }
2208 }
2209
2210 static u32 intel_adjust_tile_offset(int *x, int *y,
2211                                     unsigned int tile_width,
2212                                     unsigned int tile_height,
2213                                     unsigned int tile_size,
2214                                     unsigned int pitch_tiles,
2215                                     u32 old_offset,
2216                                     u32 new_offset)
2217 {
2218         unsigned int pitch_pixels = pitch_tiles * tile_width;
2219         unsigned int tiles;
2220
2221         WARN_ON(old_offset & (tile_size - 1));
2222         WARN_ON(new_offset & (tile_size - 1));
2223         WARN_ON(new_offset > old_offset);
2224
2225         tiles = (old_offset - new_offset) / tile_size;
2226
2227         *y += tiles / pitch_tiles * tile_height;
2228         *x += tiles % pitch_tiles * tile_width;
2229
2230         /* minimize x in case it got needlessly big */
2231         *y += *x / pitch_pixels * tile_height;
2232         *x %= pitch_pixels;
2233
2234         return new_offset;
2235 }
2236
2237 static bool is_surface_linear(u64 modifier, int color_plane)
2238 {
2239         return modifier == DRM_FORMAT_MOD_LINEAR;
2240 }
2241
2242 static u32 intel_adjust_aligned_offset(int *x, int *y,
2243                                        const struct drm_framebuffer *fb,
2244                                        int color_plane,
2245                                        unsigned int rotation,
2246                                        unsigned int pitch,
2247                                        u32 old_offset, u32 new_offset)
2248 {
2249         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2250         unsigned int cpp = fb->format->cpp[color_plane];
2251
2252         WARN_ON(new_offset > old_offset);
2253
2254         if (!is_surface_linear(fb->modifier, color_plane)) {
2255                 unsigned int tile_size, tile_width, tile_height;
2256                 unsigned int pitch_tiles;
2257
2258                 tile_size = intel_tile_size(dev_priv);
2259                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2260
2261                 if (drm_rotation_90_or_270(rotation)) {
2262                         pitch_tiles = pitch / tile_height;
2263                         swap(tile_width, tile_height);
2264                 } else {
2265                         pitch_tiles = pitch / (tile_width * cpp);
2266                 }
2267
2268                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2269                                          tile_size, pitch_tiles,
2270                                          old_offset, new_offset);
2271         } else {
2272                 old_offset += *y * pitch + *x * cpp;
2273
2274                 *y = (old_offset - new_offset) / pitch;
2275                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2276         }
2277
2278         return new_offset;
2279 }
2280
2281 /*
2282  * Adjust the tile offset by moving the difference into
2283  * the x/y offsets.
2284  */
2285 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2286                                              const struct intel_plane_state *state,
2287                                              int color_plane,
2288                                              u32 old_offset, u32 new_offset)
2289 {
2290         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2291                                            state->base.rotation,
2292                                            state->color_plane[color_plane].stride,
2293                                            old_offset, new_offset);
2294 }
2295
2296 /*
2297  * Computes the aligned offset to the base tile and adjusts
2298  * x, y. bytes per pixel is assumed to be a power-of-two.
2299  *
2300  * In the 90/270 rotated case, x and y are assumed
2301  * to be already rotated to match the rotated GTT view, and
2302  * pitch is the tile_height aligned framebuffer height.
2303  *
2304  * This function is used when computing the derived information
2305  * under intel_framebuffer, so using any of that information
2306  * here is not allowed. Anything under drm_framebuffer can be
2307  * used. This is why the user has to pass in the pitch since it
2308  * is specified in the rotated orientation.
2309  */
2310 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2311                                         int *x, int *y,
2312                                         const struct drm_framebuffer *fb,
2313                                         int color_plane,
2314                                         unsigned int pitch,
2315                                         unsigned int rotation,
2316                                         u32 alignment)
2317 {
2318         unsigned int cpp = fb->format->cpp[color_plane];
2319         u32 offset, offset_aligned;
2320
2321         if (alignment)
2322                 alignment--;
2323
2324         if (!is_surface_linear(fb->modifier, color_plane)) {
2325                 unsigned int tile_size, tile_width, tile_height;
2326                 unsigned int tile_rows, tiles, pitch_tiles;
2327
2328                 tile_size = intel_tile_size(dev_priv);
2329                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2330
2331                 if (drm_rotation_90_or_270(rotation)) {
2332                         pitch_tiles = pitch / tile_height;
2333                         swap(tile_width, tile_height);
2334                 } else {
2335                         pitch_tiles = pitch / (tile_width * cpp);
2336                 }
2337
2338                 tile_rows = *y / tile_height;
2339                 *y %= tile_height;
2340
2341                 tiles = *x / tile_width;
2342                 *x %= tile_width;
2343
2344                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2345                 offset_aligned = offset & ~alignment;
2346
2347                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2348                                          tile_size, pitch_tiles,
2349                                          offset, offset_aligned);
2350         } else {
2351                 offset = *y * pitch + *x * cpp;
2352                 offset_aligned = offset & ~alignment;
2353
2354                 *y = (offset & alignment) / pitch;
2355                 *x = ((offset & alignment) - *y * pitch) / cpp;
2356         }
2357
2358         return offset_aligned;
2359 }
2360
2361 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2362                                               const struct intel_plane_state *state,
2363                                               int color_plane)
2364 {
2365         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2366         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2367         const struct drm_framebuffer *fb = state->base.fb;
2368         unsigned int rotation = state->base.rotation;
2369         int pitch = state->color_plane[color_plane].stride;
2370         u32 alignment;
2371
2372         if (intel_plane->id == PLANE_CURSOR)
2373                 alignment = intel_cursor_alignment(dev_priv);
2374         else
2375                 alignment = intel_surf_alignment(fb, color_plane);
2376
2377         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2378                                             pitch, rotation, alignment);
2379 }
2380
2381 /* Convert the fb->offset[] into x/y offsets */
2382 static int intel_fb_offset_to_xy(int *x, int *y,
2383                                  const struct drm_framebuffer *fb,
2384                                  int color_plane)
2385 {
2386         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2387         unsigned int height;
2388
2389         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2390             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2391                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2392                               fb->offsets[color_plane], color_plane);
2393                 return -EINVAL;
2394         }
2395
2396         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2397         height = ALIGN(height, intel_tile_height(fb, color_plane));
2398
2399         /* Catch potential overflows early */
2400         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2401                             fb->offsets[color_plane])) {
2402                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2403                               fb->offsets[color_plane], fb->pitches[color_plane],
2404                               color_plane);
2405                 return -ERANGE;
2406         }
2407
2408         *x = 0;
2409         *y = 0;
2410
2411         intel_adjust_aligned_offset(x, y,
2412                                     fb, color_plane, DRM_MODE_ROTATE_0,
2413                                     fb->pitches[color_plane],
2414                                     fb->offsets[color_plane], 0);
2415
2416         return 0;
2417 }
2418
2419 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2420 {
2421         switch (fb_modifier) {
2422         case I915_FORMAT_MOD_X_TILED:
2423                 return I915_TILING_X;
2424         case I915_FORMAT_MOD_Y_TILED:
2425         case I915_FORMAT_MOD_Y_TILED_CCS:
2426                 return I915_TILING_Y;
2427         default:
2428                 return I915_TILING_NONE;
2429         }
2430 }
2431
2432 /*
2433  * From the Sky Lake PRM:
2434  * "The Color Control Surface (CCS) contains the compression status of
2435  *  the cache-line pairs. The compression state of the cache-line pair
2436  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2437  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2438  *  cache-line-pairs. CCS is always Y tiled."
2439  *
2440  * Since cache line pairs refers to horizontally adjacent cache lines,
2441  * each cache line in the CCS corresponds to an area of 32x16 cache
2442  * lines on the main surface. Since each pixel is 4 bytes, this gives
2443  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2444  * main surface.
2445  */
2446 static const struct drm_format_info ccs_formats[] = {
2447         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2448         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2449         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2450         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2451 };
2452
2453 static const struct drm_format_info *
2454 lookup_format_info(const struct drm_format_info formats[],
2455                    int num_formats, u32 format)
2456 {
2457         int i;
2458
2459         for (i = 0; i < num_formats; i++) {
2460                 if (formats[i].format == format)
2461                         return &formats[i];
2462         }
2463
2464         return NULL;
2465 }
2466
2467 static const struct drm_format_info *
2468 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2469 {
2470         switch (cmd->modifier[0]) {
2471         case I915_FORMAT_MOD_Y_TILED_CCS:
2472         case I915_FORMAT_MOD_Yf_TILED_CCS:
2473                 return lookup_format_info(ccs_formats,
2474                                           ARRAY_SIZE(ccs_formats),
2475                                           cmd->pixel_format);
2476         default:
2477                 return NULL;
2478         }
2479 }
2480
2481 bool is_ccs_modifier(u64 modifier)
2482 {
2483         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2484                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2485 }
2486
2487 static int
2488 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2489                    struct drm_framebuffer *fb)
2490 {
2491         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2492         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2493         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2494         u32 gtt_offset_rotated = 0;
2495         unsigned int max_size = 0;
2496         int i, num_planes = fb->format->num_planes;
2497         unsigned int tile_size = intel_tile_size(dev_priv);
2498
2499         for (i = 0; i < num_planes; i++) {
2500                 unsigned int width, height;
2501                 unsigned int cpp, size;
2502                 u32 offset;
2503                 int x, y;
2504                 int ret;
2505
2506                 cpp = fb->format->cpp[i];
2507                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2508                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2509
2510                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2511                 if (ret) {
2512                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2513                                       i, fb->offsets[i]);
2514                         return ret;
2515                 }
2516
2517                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2518                         int hsub = fb->format->hsub;
2519                         int vsub = fb->format->vsub;
2520                         int tile_width, tile_height;
2521                         int main_x, main_y;
2522                         int ccs_x, ccs_y;
2523
2524                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2525                         tile_width *= hsub;
2526                         tile_height *= vsub;
2527
2528                         ccs_x = (x * hsub) % tile_width;
2529                         ccs_y = (y * vsub) % tile_height;
2530                         main_x = intel_fb->normal[0].x % tile_width;
2531                         main_y = intel_fb->normal[0].y % tile_height;
2532
2533                         /*
2534                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2535                          * x/y offsets must match between CCS and the main surface.
2536                          */
2537                         if (main_x != ccs_x || main_y != ccs_y) {
2538                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2539                                               main_x, main_y,
2540                                               ccs_x, ccs_y,
2541                                               intel_fb->normal[0].x,
2542                                               intel_fb->normal[0].y,
2543                                               x, y);
2544                                 return -EINVAL;
2545                         }
2546                 }
2547
2548                 /*
2549                  * The fence (if used) is aligned to the start of the object
2550                  * so having the framebuffer wrap around across the edge of the
2551                  * fenced region doesn't really work. We have no API to configure
2552                  * the fence start offset within the object (nor could we probably
2553                  * on gen2/3). So it's just easier if we just require that the
2554                  * fb layout agrees with the fence layout. We already check that the
2555                  * fb stride matches the fence stride elsewhere.
2556                  */
2557                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2558                     (x + width) * cpp > fb->pitches[i]) {
2559                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2560                                       i, fb->offsets[i]);
2561                         return -EINVAL;
2562                 }
2563
2564                 /*
2565                  * First pixel of the framebuffer from
2566                  * the start of the normal gtt mapping.
2567                  */
2568                 intel_fb->normal[i].x = x;
2569                 intel_fb->normal[i].y = y;
2570
2571                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2572                                                       fb->pitches[i],
2573                                                       DRM_MODE_ROTATE_0,
2574                                                       tile_size);
2575                 offset /= tile_size;
2576
2577                 if (!is_surface_linear(fb->modifier, i)) {
2578                         unsigned int tile_width, tile_height;
2579                         unsigned int pitch_tiles;
2580                         struct drm_rect r;
2581
2582                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2583
2584                         rot_info->plane[i].offset = offset;
2585                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2586                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2587                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2588
2589                         intel_fb->rotated[i].pitch =
2590                                 rot_info->plane[i].height * tile_height;
2591
2592                         /* how many tiles does this plane need */
2593                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2594                         /*
2595                          * If the plane isn't horizontally tile aligned,
2596                          * we need one more tile.
2597                          */
2598                         if (x != 0)
2599                                 size++;
2600
2601                         /* rotate the x/y offsets to match the GTT view */
2602                         r.x1 = x;
2603                         r.y1 = y;
2604                         r.x2 = x + width;
2605                         r.y2 = y + height;
2606                         drm_rect_rotate(&r,
2607                                         rot_info->plane[i].width * tile_width,
2608                                         rot_info->plane[i].height * tile_height,
2609                                         DRM_MODE_ROTATE_270);
2610                         x = r.x1;
2611                         y = r.y1;
2612
2613                         /* rotate the tile dimensions to match the GTT view */
2614                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2615                         swap(tile_width, tile_height);
2616
2617                         /*
2618                          * We only keep the x/y offsets, so push all of the
2619                          * gtt offset into the x/y offsets.
2620                          */
2621                         intel_adjust_tile_offset(&x, &y,
2622                                                  tile_width, tile_height,
2623                                                  tile_size, pitch_tiles,
2624                                                  gtt_offset_rotated * tile_size, 0);
2625
2626                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2627
2628                         /*
2629                          * First pixel of the framebuffer from
2630                          * the start of the rotated gtt mapping.
2631                          */
2632                         intel_fb->rotated[i].x = x;
2633                         intel_fb->rotated[i].y = y;
2634                 } else {
2635                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2636                                             x * cpp, tile_size);
2637                 }
2638
2639                 /* how many tiles in total needed in the bo */
2640                 max_size = max(max_size, offset + size);
2641         }
2642
2643         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2644                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2645                               mul_u32_u32(max_size, tile_size), obj->base.size);
2646                 return -EINVAL;
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int i9xx_format_to_fourcc(int format)
2653 {
2654         switch (format) {
2655         case DISPPLANE_8BPP:
2656                 return DRM_FORMAT_C8;
2657         case DISPPLANE_BGRX555:
2658                 return DRM_FORMAT_XRGB1555;
2659         case DISPPLANE_BGRX565:
2660                 return DRM_FORMAT_RGB565;
2661         default:
2662         case DISPPLANE_BGRX888:
2663                 return DRM_FORMAT_XRGB8888;
2664         case DISPPLANE_RGBX888:
2665                 return DRM_FORMAT_XBGR8888;
2666         case DISPPLANE_BGRX101010:
2667                 return DRM_FORMAT_XRGB2101010;
2668         case DISPPLANE_RGBX101010:
2669                 return DRM_FORMAT_XBGR2101010;
2670         }
2671 }
2672
2673 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2674 {
2675         switch (format) {
2676         case PLANE_CTL_FORMAT_RGB_565:
2677                 return DRM_FORMAT_RGB565;
2678         case PLANE_CTL_FORMAT_NV12:
2679                 return DRM_FORMAT_NV12;
2680         case PLANE_CTL_FORMAT_P010:
2681                 return DRM_FORMAT_P010;
2682         case PLANE_CTL_FORMAT_P012:
2683                 return DRM_FORMAT_P012;
2684         case PLANE_CTL_FORMAT_P016:
2685                 return DRM_FORMAT_P016;
2686         case PLANE_CTL_FORMAT_Y210:
2687                 return DRM_FORMAT_Y210;
2688         case PLANE_CTL_FORMAT_Y212:
2689                 return DRM_FORMAT_Y212;
2690         case PLANE_CTL_FORMAT_Y216:
2691                 return DRM_FORMAT_Y216;
2692         case PLANE_CTL_FORMAT_Y410:
2693                 return DRM_FORMAT_XVYU2101010;
2694         case PLANE_CTL_FORMAT_Y412:
2695                 return DRM_FORMAT_XVYU12_16161616;
2696         case PLANE_CTL_FORMAT_Y416:
2697                 return DRM_FORMAT_XVYU16161616;
2698         default:
2699         case PLANE_CTL_FORMAT_XRGB_8888:
2700                 if (rgb_order) {
2701                         if (alpha)
2702                                 return DRM_FORMAT_ABGR8888;
2703                         else
2704                                 return DRM_FORMAT_XBGR8888;
2705                 } else {
2706                         if (alpha)
2707                                 return DRM_FORMAT_ARGB8888;
2708                         else
2709                                 return DRM_FORMAT_XRGB8888;
2710                 }
2711         case PLANE_CTL_FORMAT_XRGB_2101010:
2712                 if (rgb_order)
2713                         return DRM_FORMAT_XBGR2101010;
2714                 else
2715                         return DRM_FORMAT_XRGB2101010;
2716         case PLANE_CTL_FORMAT_XRGB_16161616F:
2717                 if (rgb_order) {
2718                         if (alpha)
2719                                 return DRM_FORMAT_ABGR16161616F;
2720                         else
2721                                 return DRM_FORMAT_XBGR16161616F;
2722                 } else {
2723                         if (alpha)
2724                                 return DRM_FORMAT_ARGB16161616F;
2725                         else
2726                                 return DRM_FORMAT_XRGB16161616F;
2727                 }
2728         }
2729 }
2730
2731 static bool
2732 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2733                               struct intel_initial_plane_config *plane_config)
2734 {
2735         struct drm_device *dev = crtc->base.dev;
2736         struct drm_i915_private *dev_priv = to_i915(dev);
2737         struct drm_i915_gem_object *obj = NULL;
2738         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2739         struct drm_framebuffer *fb = &plane_config->fb->base;
2740         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2741         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2742                                     PAGE_SIZE);
2743
2744         size_aligned -= base_aligned;
2745
2746         if (plane_config->size == 0)
2747                 return false;
2748
2749         /* If the FB is too big, just don't use it since fbdev is not very
2750          * important and we should probably use that space with FBC or other
2751          * features. */
2752         if (size_aligned * 2 > dev_priv->stolen_usable_size)
2753                 return false;
2754
2755         switch (fb->modifier) {
2756         case DRM_FORMAT_MOD_LINEAR:
2757         case I915_FORMAT_MOD_X_TILED:
2758         case I915_FORMAT_MOD_Y_TILED:
2759                 break;
2760         default:
2761                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2762                                  fb->modifier);
2763                 return false;
2764         }
2765
2766         mutex_lock(&dev->struct_mutex);
2767         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2768                                                              base_aligned,
2769                                                              base_aligned,
2770                                                              size_aligned);
2771         mutex_unlock(&dev->struct_mutex);
2772         if (!obj)
2773                 return false;
2774
2775         switch (plane_config->tiling) {
2776         case I915_TILING_NONE:
2777                 break;
2778         case I915_TILING_X:
2779         case I915_TILING_Y:
2780                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2781                 break;
2782         default:
2783                 MISSING_CASE(plane_config->tiling);
2784                 return false;
2785         }
2786
2787         mode_cmd.pixel_format = fb->format->format;
2788         mode_cmd.width = fb->width;
2789         mode_cmd.height = fb->height;
2790         mode_cmd.pitches[0] = fb->pitches[0];
2791         mode_cmd.modifier[0] = fb->modifier;
2792         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2793
2794         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2795                 DRM_DEBUG_KMS("intel fb init failed\n");
2796                 goto out_unref_obj;
2797         }
2798
2799
2800         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2801         return true;
2802
2803 out_unref_obj:
2804         i915_gem_object_put(obj);
2805         return false;
2806 }
2807
2808 static void
2809 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2810                         struct intel_plane_state *plane_state,
2811                         bool visible)
2812 {
2813         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2814
2815         plane_state->base.visible = visible;
2816
2817         if (visible)
2818                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2819         else
2820                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2821 }
2822
2823 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2824 {
2825         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2826         struct drm_plane *plane;
2827
2828         /*
2829          * Active_planes aliases if multiple "primary" or cursor planes
2830          * have been used on the same (or wrong) pipe. plane_mask uses
2831          * unique ids, hence we can use that to reconstruct active_planes.
2832          */
2833         crtc_state->active_planes = 0;
2834
2835         drm_for_each_plane_mask(plane, &dev_priv->drm,
2836                                 crtc_state->base.plane_mask)
2837                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2838 }
2839
2840 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2841                                          struct intel_plane *plane)
2842 {
2843         struct intel_crtc_state *crtc_state =
2844                 to_intel_crtc_state(crtc->base.state);
2845         struct intel_plane_state *plane_state =
2846                 to_intel_plane_state(plane->base.state);
2847
2848         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2849                       plane->base.base.id, plane->base.name,
2850                       crtc->base.base.id, crtc->base.name);
2851
2852         intel_set_plane_visible(crtc_state, plane_state, false);
2853         fixup_active_planes(crtc_state);
2854
2855         if (plane->id == PLANE_PRIMARY)
2856                 intel_pre_disable_primary_noatomic(&crtc->base);
2857
2858         trace_intel_disable_plane(&plane->base, crtc);
2859         plane->disable_plane(plane, crtc_state);
2860 }
2861
2862 static void
2863 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2864                              struct intel_initial_plane_config *plane_config)
2865 {
2866         struct drm_device *dev = intel_crtc->base.dev;
2867         struct drm_i915_private *dev_priv = to_i915(dev);
2868         struct drm_crtc *c;
2869         struct drm_i915_gem_object *obj;
2870         struct drm_plane *primary = intel_crtc->base.primary;
2871         struct drm_plane_state *plane_state = primary->state;
2872         struct intel_plane *intel_plane = to_intel_plane(primary);
2873         struct intel_plane_state *intel_state =
2874                 to_intel_plane_state(plane_state);
2875         struct drm_framebuffer *fb;
2876
2877         if (!plane_config->fb)
2878                 return;
2879
2880         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2881                 fb = &plane_config->fb->base;
2882                 goto valid_fb;
2883         }
2884
2885         kfree(plane_config->fb);
2886
2887         /*
2888          * Failed to alloc the obj, check to see if we should share
2889          * an fb with another CRTC instead
2890          */
2891         for_each_crtc(dev, c) {
2892                 struct intel_plane_state *state;
2893
2894                 if (c == &intel_crtc->base)
2895                         continue;
2896
2897                 if (!to_intel_crtc(c)->active)
2898                         continue;
2899
2900                 state = to_intel_plane_state(c->primary->state);
2901                 if (!state->vma)
2902                         continue;
2903
2904                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2905                         fb = state->base.fb;
2906                         drm_framebuffer_get(fb);
2907                         goto valid_fb;
2908                 }
2909         }
2910
2911         /*
2912          * We've failed to reconstruct the BIOS FB.  Current display state
2913          * indicates that the primary plane is visible, but has a NULL FB,
2914          * which will lead to problems later if we don't fix it up.  The
2915          * simplest solution is to just disable the primary plane now and
2916          * pretend the BIOS never had it enabled.
2917          */
2918         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2919
2920         return;
2921
2922 valid_fb:
2923         intel_state->base.rotation = plane_config->rotation;
2924         intel_fill_fb_ggtt_view(&intel_state->view, fb,
2925                                 intel_state->base.rotation);
2926         intel_state->color_plane[0].stride =
2927                 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2928
2929         mutex_lock(&dev->struct_mutex);
2930         intel_state->vma =
2931                 intel_pin_and_fence_fb_obj(fb,
2932                                            &intel_state->view,
2933                                            intel_plane_uses_fence(intel_state),
2934                                            &intel_state->flags);
2935         mutex_unlock(&dev->struct_mutex);
2936         if (IS_ERR(intel_state->vma)) {
2937                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2938                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2939
2940                 intel_state->vma = NULL;
2941                 drm_framebuffer_put(fb);
2942                 return;
2943         }
2944
2945         obj = intel_fb_obj(fb);
2946         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2947
2948         plane_state->src_x = 0;
2949         plane_state->src_y = 0;
2950         plane_state->src_w = fb->width << 16;
2951         plane_state->src_h = fb->height << 16;
2952
2953         plane_state->crtc_x = 0;
2954         plane_state->crtc_y = 0;
2955         plane_state->crtc_w = fb->width;
2956         plane_state->crtc_h = fb->height;
2957
2958         intel_state->base.src = drm_plane_state_src(plane_state);
2959         intel_state->base.dst = drm_plane_state_dest(plane_state);
2960
2961         if (i915_gem_object_is_tiled(obj))
2962                 dev_priv->preserve_bios_swizzle = true;
2963
2964         plane_state->fb = fb;
2965         plane_state->crtc = &intel_crtc->base;
2966
2967         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2968                   &obj->frontbuffer_bits);
2969 }
2970
2971 static int skl_max_plane_width(const struct drm_framebuffer *fb,
2972                                int color_plane,
2973                                unsigned int rotation)
2974 {
2975         int cpp = fb->format->cpp[color_plane];
2976
2977         switch (fb->modifier) {
2978         case DRM_FORMAT_MOD_LINEAR:
2979         case I915_FORMAT_MOD_X_TILED:
2980                 switch (cpp) {
2981                 case 8:
2982                         return 4096;
2983                 case 4:
2984                 case 2:
2985                 case 1:
2986                         return 8192;
2987                 default:
2988                         MISSING_CASE(cpp);
2989                         break;
2990                 }
2991                 break;
2992         case I915_FORMAT_MOD_Y_TILED_CCS:
2993         case I915_FORMAT_MOD_Yf_TILED_CCS:
2994                 /* FIXME AUX plane? */
2995         case I915_FORMAT_MOD_Y_TILED:
2996         case I915_FORMAT_MOD_Yf_TILED:
2997                 switch (cpp) {
2998                 case 8:
2999                         return 2048;
3000                 case 4:
3001                         return 4096;
3002                 case 2:
3003                 case 1:
3004                         return 8192;
3005                 default:
3006                         MISSING_CASE(cpp);
3007                         break;
3008                 }
3009                 break;
3010         default:
3011                 MISSING_CASE(fb->modifier);
3012         }
3013
3014         return 2048;
3015 }
3016
3017 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3018                                            int main_x, int main_y, u32 main_offset)
3019 {
3020         const struct drm_framebuffer *fb = plane_state->base.fb;
3021         int hsub = fb->format->hsub;
3022         int vsub = fb->format->vsub;
3023         int aux_x = plane_state->color_plane[1].x;
3024         int aux_y = plane_state->color_plane[1].y;
3025         u32 aux_offset = plane_state->color_plane[1].offset;
3026         u32 alignment = intel_surf_alignment(fb, 1);
3027
3028         while (aux_offset >= main_offset && aux_y <= main_y) {
3029                 int x, y;
3030
3031                 if (aux_x == main_x && aux_y == main_y)
3032                         break;
3033
3034                 if (aux_offset == 0)
3035                         break;
3036
3037                 x = aux_x / hsub;
3038                 y = aux_y / vsub;
3039                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3040                                                                aux_offset, aux_offset - alignment);
3041                 aux_x = x * hsub + aux_x % hsub;
3042                 aux_y = y * vsub + aux_y % vsub;
3043         }
3044
3045         if (aux_x != main_x || aux_y != main_y)
3046                 return false;
3047
3048         plane_state->color_plane[1].offset = aux_offset;
3049         plane_state->color_plane[1].x = aux_x;
3050         plane_state->color_plane[1].y = aux_y;
3051
3052         return true;
3053 }
3054
3055 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3056 {
3057         const struct drm_framebuffer *fb = plane_state->base.fb;
3058         unsigned int rotation = plane_state->base.rotation;
3059         int x = plane_state->base.src.x1 >> 16;
3060         int y = plane_state->base.src.y1 >> 16;
3061         int w = drm_rect_width(&plane_state->base.src) >> 16;
3062         int h = drm_rect_height(&plane_state->base.src) >> 16;
3063         int max_width = skl_max_plane_width(fb, 0, rotation);
3064         int max_height = 4096;
3065         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3066
3067         if (w > max_width || h > max_height) {
3068                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3069                               w, h, max_width, max_height);
3070                 return -EINVAL;
3071         }
3072
3073         intel_add_fb_offsets(&x, &y, plane_state, 0);
3074         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3075         alignment = intel_surf_alignment(fb, 0);
3076
3077         /*
3078          * AUX surface offset is specified as the distance from the
3079          * main surface offset, and it must be non-negative. Make
3080          * sure that is what we will get.
3081          */
3082         if (offset > aux_offset)
3083                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3084                                                            offset, aux_offset & ~(alignment - 1));
3085
3086         /*
3087          * When using an X-tiled surface, the plane blows up
3088          * if the x offset + width exceed the stride.
3089          *
3090          * TODO: linear and Y-tiled seem fine, Yf untested,
3091          */
3092         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3093                 int cpp = fb->format->cpp[0];
3094
3095                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3096                         if (offset == 0) {
3097                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3098                                 return -EINVAL;
3099                         }
3100
3101                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3102                                                                    offset, offset - alignment);
3103                 }
3104         }
3105
3106         /*
3107          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3108          * they match with the main surface x/y offsets.
3109          */
3110         if (is_ccs_modifier(fb->modifier)) {
3111                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3112                         if (offset == 0)
3113                                 break;
3114
3115                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3116                                                                    offset, offset - alignment);
3117                 }
3118
3119                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3120                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3121                         return -EINVAL;
3122                 }
3123         }
3124
3125         plane_state->color_plane[0].offset = offset;
3126         plane_state->color_plane[0].x = x;
3127         plane_state->color_plane[0].y = y;
3128
3129         return 0;
3130 }
3131
3132 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3133 {
3134         const struct drm_framebuffer *fb = plane_state->base.fb;
3135         unsigned int rotation = plane_state->base.rotation;
3136         int max_width = skl_max_plane_width(fb, 1, rotation);
3137         int max_height = 4096;
3138         int x = plane_state->base.src.x1 >> 17;
3139         int y = plane_state->base.src.y1 >> 17;
3140         int w = drm_rect_width(&plane_state->base.src) >> 17;
3141         int h = drm_rect_height(&plane_state->base.src) >> 17;
3142         u32 offset;
3143
3144         intel_add_fb_offsets(&x, &y, plane_state, 1);
3145         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3146
3147         /* FIXME not quite sure how/if these apply to the chroma plane */
3148         if (w > max_width || h > max_height) {
3149                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3150                               w, h, max_width, max_height);
3151                 return -EINVAL;
3152         }
3153
3154         plane_state->color_plane[1].offset = offset;
3155         plane_state->color_plane[1].x = x;
3156         plane_state->color_plane[1].y = y;
3157
3158         return 0;
3159 }
3160
3161 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3162 {
3163         const struct drm_framebuffer *fb = plane_state->base.fb;
3164         int src_x = plane_state->base.src.x1 >> 16;
3165         int src_y = plane_state->base.src.y1 >> 16;
3166         int hsub = fb->format->hsub;
3167         int vsub = fb->format->vsub;
3168         int x = src_x / hsub;
3169         int y = src_y / vsub;
3170         u32 offset;
3171
3172         intel_add_fb_offsets(&x, &y, plane_state, 1);
3173         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3174
3175         plane_state->color_plane[1].offset = offset;
3176         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3177         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3178
3179         return 0;
3180 }
3181
3182 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3183 {
3184         const struct drm_framebuffer *fb = plane_state->base.fb;
3185         unsigned int rotation = plane_state->base.rotation;
3186         int ret;
3187
3188         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3189         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3190         plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3191
3192         ret = intel_plane_check_stride(plane_state);
3193         if (ret)
3194                 return ret;
3195
3196         if (!plane_state->base.visible)
3197                 return 0;
3198
3199         /* Rotate src coordinates to match rotated GTT view */
3200         if (drm_rotation_90_or_270(rotation))
3201                 drm_rect_rotate(&plane_state->base.src,
3202                                 fb->width << 16, fb->height << 16,
3203                                 DRM_MODE_ROTATE_270);
3204
3205         /*
3206          * Handle the AUX surface first since
3207          * the main surface setup depends on it.
3208          */
3209         if (is_planar_yuv_format(fb->format->format)) {
3210                 ret = skl_check_nv12_aux_surface(plane_state);
3211                 if (ret)
3212                         return ret;
3213         } else if (is_ccs_modifier(fb->modifier)) {
3214                 ret = skl_check_ccs_aux_surface(plane_state);
3215                 if (ret)
3216                         return ret;
3217         } else {
3218                 plane_state->color_plane[1].offset = ~0xfff;
3219                 plane_state->color_plane[1].x = 0;
3220                 plane_state->color_plane[1].y = 0;
3221         }
3222
3223         ret = skl_check_main_surface(plane_state);
3224         if (ret)
3225                 return ret;
3226
3227         return 0;
3228 }
3229
3230 unsigned int
3231 i9xx_plane_max_stride(struct intel_plane *plane,
3232                       u32 pixel_format, u64 modifier,
3233                       unsigned int rotation)
3234 {
3235         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3236
3237         if (!HAS_GMCH(dev_priv)) {
3238                 return 32*1024;
3239         } else if (INTEL_GEN(dev_priv) >= 4) {
3240                 if (modifier == I915_FORMAT_MOD_X_TILED)
3241                         return 16*1024;
3242                 else
3243                         return 32*1024;
3244         } else if (INTEL_GEN(dev_priv) >= 3) {
3245                 if (modifier == I915_FORMAT_MOD_X_TILED)
3246                         return 8*1024;
3247                 else
3248                         return 16*1024;
3249         } else {
3250                 if (plane->i9xx_plane == PLANE_C)
3251                         return 4*1024;
3252                 else
3253                         return 8*1024;
3254         }
3255 }
3256
3257 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3258 {
3259         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3260         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3261         u32 dspcntr = 0;
3262
3263         dspcntr |= DISPPLANE_GAMMA_ENABLE;
3264
3265         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3266                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3267
3268         if (INTEL_GEN(dev_priv) < 5)
3269                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3270
3271         return dspcntr;
3272 }
3273
3274 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3275                           const struct intel_plane_state *plane_state)
3276 {
3277         struct drm_i915_private *dev_priv =
3278                 to_i915(plane_state->base.plane->dev);
3279         const struct drm_framebuffer *fb = plane_state->base.fb;
3280         unsigned int rotation = plane_state->base.rotation;
3281         u32 dspcntr;
3282
3283         dspcntr = DISPLAY_PLANE_ENABLE;
3284
3285         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3286             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3287                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3288
3289         switch (fb->format->format) {
3290         case DRM_FORMAT_C8:
3291                 dspcntr |= DISPPLANE_8BPP;
3292                 break;
3293         case DRM_FORMAT_XRGB1555:
3294                 dspcntr |= DISPPLANE_BGRX555;
3295                 break;
3296         case DRM_FORMAT_RGB565:
3297                 dspcntr |= DISPPLANE_BGRX565;
3298                 break;
3299         case DRM_FORMAT_XRGB8888:
3300                 dspcntr |= DISPPLANE_BGRX888;
3301                 break;
3302         case DRM_FORMAT_XBGR8888:
3303                 dspcntr |= DISPPLANE_RGBX888;
3304                 break;
3305         case DRM_FORMAT_XRGB2101010:
3306                 dspcntr |= DISPPLANE_BGRX101010;
3307                 break;
3308         case DRM_FORMAT_XBGR2101010:
3309                 dspcntr |= DISPPLANE_RGBX101010;
3310                 break;
3311         default:
3312                 MISSING_CASE(fb->format->format);
3313                 return 0;
3314         }
3315
3316         if (INTEL_GEN(dev_priv) >= 4 &&
3317             fb->modifier == I915_FORMAT_MOD_X_TILED)
3318                 dspcntr |= DISPPLANE_TILED;
3319
3320         if (rotation & DRM_MODE_ROTATE_180)
3321                 dspcntr |= DISPPLANE_ROTATE_180;
3322
3323         if (rotation & DRM_MODE_REFLECT_X)
3324                 dspcntr |= DISPPLANE_MIRROR;
3325
3326         return dspcntr;
3327 }
3328
3329 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3330 {
3331         struct drm_i915_private *dev_priv =
3332                 to_i915(plane_state->base.plane->dev);
3333         const struct drm_framebuffer *fb = plane_state->base.fb;
3334         unsigned int rotation = plane_state->base.rotation;
3335         int src_x = plane_state->base.src.x1 >> 16;
3336         int src_y = plane_state->base.src.y1 >> 16;
3337         u32 offset;
3338         int ret;
3339
3340         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
3341         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3342
3343         ret = intel_plane_check_stride(plane_state);
3344         if (ret)
3345                 return ret;
3346
3347         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3348
3349         if (INTEL_GEN(dev_priv) >= 4)
3350                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3351                                                             plane_state, 0);
3352         else
3353                 offset = 0;
3354
3355         /* HSW/BDW do this automagically in hardware */
3356         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3357                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3358                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3359
3360                 if (rotation & DRM_MODE_ROTATE_180) {
3361                         src_x += src_w - 1;
3362                         src_y += src_h - 1;
3363                 } else if (rotation & DRM_MODE_REFLECT_X) {
3364                         src_x += src_w - 1;
3365                 }
3366         }
3367
3368         plane_state->color_plane[0].offset = offset;
3369         plane_state->color_plane[0].x = src_x;
3370         plane_state->color_plane[0].y = src_y;
3371
3372         return 0;
3373 }
3374
3375 static int
3376 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3377                  struct intel_plane_state *plane_state)
3378 {
3379         int ret;
3380
3381         ret = chv_plane_check_rotation(plane_state);
3382         if (ret)
3383                 return ret;
3384
3385         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3386                                                   &crtc_state->base,
3387                                                   DRM_PLANE_HELPER_NO_SCALING,
3388                                                   DRM_PLANE_HELPER_NO_SCALING,
3389                                                   false, true);
3390         if (ret)
3391                 return ret;
3392
3393         if (!plane_state->base.visible)
3394                 return 0;
3395
3396         ret = intel_plane_check_src_coordinates(plane_state);
3397         if (ret)
3398                 return ret;
3399
3400         ret = i9xx_check_plane_surface(plane_state);
3401         if (ret)
3402                 return ret;
3403
3404         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3405
3406         return 0;
3407 }
3408
3409 static void i9xx_update_plane(struct intel_plane *plane,
3410                               const struct intel_crtc_state *crtc_state,
3411                               const struct intel_plane_state *plane_state)
3412 {
3413         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3414         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3415         u32 linear_offset;
3416         int x = plane_state->color_plane[0].x;
3417         int y = plane_state->color_plane[0].y;
3418         unsigned long irqflags;
3419         u32 dspaddr_offset;
3420         u32 dspcntr;
3421
3422         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3423
3424         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3425
3426         if (INTEL_GEN(dev_priv) >= 4)
3427                 dspaddr_offset = plane_state->color_plane[0].offset;
3428         else
3429                 dspaddr_offset = linear_offset;
3430
3431         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3432
3433         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3434
3435         if (INTEL_GEN(dev_priv) < 4) {
3436                 /* pipesrc and dspsize control the size that is scaled from,
3437                  * which should always be the user's requested size.
3438                  */
3439                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3440                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3441                               ((crtc_state->pipe_src_h - 1) << 16) |
3442                               (crtc_state->pipe_src_w - 1));
3443         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3444                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3445                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3446                               ((crtc_state->pipe_src_h - 1) << 16) |
3447                               (crtc_state->pipe_src_w - 1));
3448                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3449         }
3450
3451         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3452                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3453         } else if (INTEL_GEN(dev_priv) >= 4) {
3454                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3455                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3456         }
3457
3458         /*
3459          * The control register self-arms if the plane was previously
3460          * disabled. Try to make the plane enable atomic by writing
3461          * the control register just before the surface register.
3462          */
3463         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3464         if (INTEL_GEN(dev_priv) >= 4)
3465                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3466                               intel_plane_ggtt_offset(plane_state) +
3467                               dspaddr_offset);
3468         else
3469                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3470                               intel_plane_ggtt_offset(plane_state) +
3471                               dspaddr_offset);
3472
3473         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3474 }
3475
3476 static void i9xx_disable_plane(struct intel_plane *plane,
3477                                const struct intel_crtc_state *crtc_state)
3478 {
3479         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3480         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3481         unsigned long irqflags;
3482         u32 dspcntr;
3483
3484         /*
3485          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3486          * enable on ilk+ affect the pipe bottom color as
3487          * well, so we must configure them even if the plane
3488          * is disabled.
3489          *
3490          * On pre-g4x there is no way to gamma correct the
3491          * pipe bottom color but we'll keep on doing this
3492          * anyway.
3493          */
3494         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3495
3496         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3497
3498         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3499         if (INTEL_GEN(dev_priv) >= 4)
3500                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3501         else
3502                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3503
3504         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3505 }
3506
3507 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3508                                     enum pipe *pipe)
3509 {
3510         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3511         enum intel_display_power_domain power_domain;
3512         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3513         intel_wakeref_t wakeref;
3514         bool ret;
3515         u32 val;
3516
3517         /*
3518          * Not 100% correct for planes that can move between pipes,
3519          * but that's only the case for gen2-4 which don't have any
3520          * display power wells.
3521          */
3522         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3523         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3524         if (!wakeref)
3525                 return false;
3526
3527         val = I915_READ(DSPCNTR(i9xx_plane));
3528
3529         ret = val & DISPLAY_PLANE_ENABLE;
3530
3531         if (INTEL_GEN(dev_priv) >= 5)
3532                 *pipe = plane->pipe;
3533         else
3534                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3535                         DISPPLANE_SEL_PIPE_SHIFT;
3536
3537         intel_display_power_put(dev_priv, power_domain, wakeref);
3538
3539         return ret;
3540 }
3541
3542 static u32
3543 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
3544 {
3545         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3546                 return 64;
3547         else
3548                 return intel_tile_width_bytes(fb, color_plane);
3549 }
3550
3551 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3552 {
3553         struct drm_device *dev = intel_crtc->base.dev;
3554         struct drm_i915_private *dev_priv = to_i915(dev);
3555
3556         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3557         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3558         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3559 }
3560
3561 /*
3562  * This function detaches (aka. unbinds) unused scalers in hardware
3563  */
3564 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3565 {
3566         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3567         const struct intel_crtc_scaler_state *scaler_state =
3568                 &crtc_state->scaler_state;
3569         int i;
3570
3571         /* loop through and disable scalers that aren't in use */
3572         for (i = 0; i < intel_crtc->num_scalers; i++) {
3573                 if (!scaler_state->scalers[i].in_use)
3574                         skl_detach_scaler(intel_crtc, i);
3575         }
3576 }
3577
3578 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3579                                           int color_plane, unsigned int rotation)
3580 {
3581         /*
3582          * The stride is either expressed as a multiple of 64 bytes chunks for
3583          * linear buffers or in number of tiles for tiled buffers.
3584          */
3585         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3586                 return 64;
3587         else if (drm_rotation_90_or_270(rotation))
3588                 return intel_tile_height(fb, color_plane);
3589         else
3590                 return intel_tile_width_bytes(fb, color_plane);
3591 }
3592
3593 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3594                      int color_plane)
3595 {
3596         const struct drm_framebuffer *fb = plane_state->base.fb;
3597         unsigned int rotation = plane_state->base.rotation;
3598         u32 stride = plane_state->color_plane[color_plane].stride;
3599
3600         if (color_plane >= fb->format->num_planes)
3601                 return 0;
3602
3603         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3604 }
3605
3606 static u32 skl_plane_ctl_format(u32 pixel_format)
3607 {
3608         switch (pixel_format) {
3609         case DRM_FORMAT_C8:
3610                 return PLANE_CTL_FORMAT_INDEXED;
3611         case DRM_FORMAT_RGB565:
3612                 return PLANE_CTL_FORMAT_RGB_565;
3613         case DRM_FORMAT_XBGR8888:
3614         case DRM_FORMAT_ABGR8888:
3615                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3616         case DRM_FORMAT_XRGB8888:
3617         case DRM_FORMAT_ARGB8888:
3618                 return PLANE_CTL_FORMAT_XRGB_8888;
3619         case DRM_FORMAT_XRGB2101010:
3620                 return PLANE_CTL_FORMAT_XRGB_2101010;
3621         case DRM_FORMAT_XBGR2101010:
3622                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3623         case DRM_FORMAT_XBGR16161616F:
3624         case DRM_FORMAT_ABGR16161616F:
3625                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3626         case DRM_FORMAT_XRGB16161616F:
3627         case DRM_FORMAT_ARGB16161616F:
3628                 return PLANE_CTL_FORMAT_XRGB_16161616F;
3629         case DRM_FORMAT_YUYV:
3630                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3631         case DRM_FORMAT_YVYU:
3632                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3633         case DRM_FORMAT_UYVY:
3634                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3635         case DRM_FORMAT_VYUY:
3636                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3637         case DRM_FORMAT_NV12:
3638                 return PLANE_CTL_FORMAT_NV12;
3639         case DRM_FORMAT_P010:
3640                 return PLANE_CTL_FORMAT_P010;
3641         case DRM_FORMAT_P012:
3642                 return PLANE_CTL_FORMAT_P012;
3643         case DRM_FORMAT_P016:
3644                 return PLANE_CTL_FORMAT_P016;
3645         case DRM_FORMAT_Y210:
3646                 return PLANE_CTL_FORMAT_Y210;
3647         case DRM_FORMAT_Y212:
3648                 return PLANE_CTL_FORMAT_Y212;
3649         case DRM_FORMAT_Y216:
3650                 return PLANE_CTL_FORMAT_Y216;
3651         case DRM_FORMAT_XVYU2101010:
3652                 return PLANE_CTL_FORMAT_Y410;
3653         case DRM_FORMAT_XVYU12_16161616:
3654                 return PLANE_CTL_FORMAT_Y412;
3655         case DRM_FORMAT_XVYU16161616:
3656                 return PLANE_CTL_FORMAT_Y416;
3657         default:
3658                 MISSING_CASE(pixel_format);
3659         }
3660
3661         return 0;
3662 }
3663
3664 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3665 {
3666         if (!plane_state->base.fb->format->has_alpha)
3667                 return PLANE_CTL_ALPHA_DISABLE;
3668
3669         switch (plane_state->base.pixel_blend_mode) {
3670         case DRM_MODE_BLEND_PIXEL_NONE:
3671                 return PLANE_CTL_ALPHA_DISABLE;
3672         case DRM_MODE_BLEND_PREMULTI:
3673                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3674         case DRM_MODE_BLEND_COVERAGE:
3675                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
3676         default:
3677                 MISSING_CASE(plane_state->base.pixel_blend_mode);
3678                 return PLANE_CTL_ALPHA_DISABLE;
3679         }
3680 }
3681
3682 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
3683 {
3684         if (!plane_state->base.fb->format->has_alpha)
3685                 return PLANE_COLOR_ALPHA_DISABLE;
3686
3687         switch (plane_state->base.pixel_blend_mode) {
3688         case DRM_MODE_BLEND_PIXEL_NONE:
3689                 return PLANE_COLOR_ALPHA_DISABLE;
3690         case DRM_MODE_BLEND_PREMULTI:
3691                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3692         case DRM_MODE_BLEND_COVERAGE:
3693                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
3694         default:
3695                 MISSING_CASE(plane_state->base.pixel_blend_mode);
3696                 return PLANE_COLOR_ALPHA_DISABLE;
3697         }
3698 }
3699
3700 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
3701 {
3702         switch (fb_modifier) {
3703         case DRM_FORMAT_MOD_LINEAR:
3704                 break;
3705         case I915_FORMAT_MOD_X_TILED:
3706                 return PLANE_CTL_TILED_X;
3707         case I915_FORMAT_MOD_Y_TILED:
3708                 return PLANE_CTL_TILED_Y;
3709         case I915_FORMAT_MOD_Y_TILED_CCS:
3710                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3711         case I915_FORMAT_MOD_Yf_TILED:
3712                 return PLANE_CTL_TILED_YF;
3713         case I915_FORMAT_MOD_Yf_TILED_CCS:
3714                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3715         default:
3716                 MISSING_CASE(fb_modifier);
3717         }
3718
3719         return 0;
3720 }
3721
3722 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3723 {
3724         switch (rotate) {
3725         case DRM_MODE_ROTATE_0:
3726                 break;
3727         /*
3728          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3729          * while i915 HW rotation is clockwise, thats why this swapping.
3730          */
3731         case DRM_MODE_ROTATE_90:
3732                 return PLANE_CTL_ROTATE_270;
3733         case DRM_MODE_ROTATE_180:
3734                 return PLANE_CTL_ROTATE_180;
3735         case DRM_MODE_ROTATE_270:
3736                 return PLANE_CTL_ROTATE_90;
3737         default:
3738                 MISSING_CASE(rotate);
3739         }
3740
3741         return 0;
3742 }
3743
3744 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3745 {
3746         switch (reflect) {
3747         case 0:
3748                 break;
3749         case DRM_MODE_REFLECT_X:
3750                 return PLANE_CTL_FLIP_HORIZONTAL;
3751         case DRM_MODE_REFLECT_Y:
3752         default:
3753                 MISSING_CASE(reflect);
3754         }
3755
3756         return 0;
3757 }
3758
3759 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3760 {
3761         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3762         u32 plane_ctl = 0;
3763
3764         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3765                 return plane_ctl;
3766
3767         plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
3768         plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
3769
3770         return plane_ctl;
3771 }
3772
3773 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3774                   const struct intel_plane_state *plane_state)
3775 {
3776         struct drm_i915_private *dev_priv =
3777                 to_i915(plane_state->base.plane->dev);
3778         const struct drm_framebuffer *fb = plane_state->base.fb;
3779         unsigned int rotation = plane_state->base.rotation;
3780         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3781         u32 plane_ctl;
3782
3783         plane_ctl = PLANE_CTL_ENABLE;
3784
3785         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3786                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
3787                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3788
3789                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3790                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3791
3792                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3793                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3794         }
3795
3796         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3797         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3798         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3799
3800         if (INTEL_GEN(dev_priv) >= 10)
3801                 plane_ctl |= cnl_plane_ctl_flip(rotation &
3802                                                 DRM_MODE_REFLECT_MASK);
3803
3804         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3805                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3806         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3807                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3808
3809         return plane_ctl;
3810 }
3811
3812 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
3813 {
3814         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3815         u32 plane_color_ctl = 0;
3816
3817         if (INTEL_GEN(dev_priv) >= 11)
3818                 return plane_color_ctl;
3819
3820         plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3821         plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3822
3823         return plane_color_ctl;
3824 }
3825
3826 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3827                         const struct intel_plane_state *plane_state)
3828 {
3829         struct drm_i915_private *dev_priv =
3830                 to_i915(plane_state->base.plane->dev);
3831         const struct drm_framebuffer *fb = plane_state->base.fb;
3832         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3833         u32 plane_color_ctl = 0;
3834
3835         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3836         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
3837
3838         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
3839                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3840                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3841                 else
3842                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3843
3844                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3845                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3846         } else if (fb->format->is_yuv) {
3847                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
3848         }
3849
3850         return plane_color_ctl;
3851 }
3852
3853 static int
3854 __intel_display_resume(struct drm_device *dev,
3855                        struct drm_atomic_state *state,
3856                        struct drm_modeset_acquire_ctx *ctx)
3857 {
3858         struct drm_crtc_state *crtc_state;
3859         struct drm_crtc *crtc;
3860         int i, ret;
3861
3862         intel_modeset_setup_hw_state(dev, ctx);
3863         i915_redisable_vga(to_i915(dev));
3864
3865         if (!state)
3866                 return 0;
3867
3868         /*
3869          * We've duplicated the state, pointers to the old state are invalid.
3870          *
3871          * Don't attempt to use the old state until we commit the duplicated state.
3872          */
3873         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3874                 /*
3875                  * Force recalculation even if we restore
3876                  * current state. With fast modeset this may not result
3877                  * in a modeset when the state is compatible.
3878                  */
3879                 crtc_state->mode_changed = true;
3880         }
3881
3882         /* ignore any reset values/BIOS leftovers in the WM registers */
3883         if (!HAS_GMCH(to_i915(dev)))
3884                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3885
3886         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3887
3888         WARN_ON(ret == -EDEADLK);
3889         return ret;
3890 }
3891
3892 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3893 {
3894         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3895                 intel_has_gpu_reset(dev_priv));
3896 }
3897
3898 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3899 {
3900         struct drm_device *dev = &dev_priv->drm;
3901         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3902         struct drm_atomic_state *state;
3903         int ret;
3904
3905         /* reset doesn't touch the display */
3906         if (!i915_modparams.force_reset_modeset_test &&
3907             !gpu_reset_clobbers_display(dev_priv))
3908                 return;
3909
3910         /* We have a modeset vs reset deadlock, defensively unbreak it. */
3911         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3912         wake_up_all(&dev_priv->gpu_error.wait_queue);
3913
3914         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3915                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3916                 i915_gem_set_wedged(dev_priv);
3917         }
3918
3919         /*
3920          * Need mode_config.mutex so that we don't
3921          * trample ongoing ->detect() and whatnot.
3922          */
3923         mutex_lock(&dev->mode_config.mutex);
3924         drm_modeset_acquire_init(ctx, 0);
3925         while (1) {
3926                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3927                 if (ret != -EDEADLK)
3928                         break;
3929
3930                 drm_modeset_backoff(ctx);
3931         }
3932         /*
3933          * Disabling the crtcs gracefully seems nicer. Also the
3934          * g33 docs say we should at least disable all the planes.
3935          */
3936         state = drm_atomic_helper_duplicate_state(dev, ctx);
3937         if (IS_ERR(state)) {
3938                 ret = PTR_ERR(state);
3939                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3940                 return;
3941         }
3942
3943         ret = drm_atomic_helper_disable_all(dev, ctx);
3944         if (ret) {
3945                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3946                 drm_atomic_state_put(state);
3947                 return;
3948         }
3949
3950         dev_priv->modeset_restore_state = state;
3951         state->acquire_ctx = ctx;
3952 }
3953
3954 void intel_finish_reset(struct drm_i915_private *dev_priv)
3955 {
3956         struct drm_device *dev = &dev_priv->drm;
3957         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3958         struct drm_atomic_state *state;
3959         int ret;
3960
3961         /* reset doesn't touch the display */
3962         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3963                 return;
3964
3965         state = fetch_and_zero(&dev_priv->modeset_restore_state);
3966         if (!state)
3967                 goto unlock;
3968
3969         /* reset doesn't touch the display */
3970         if (!gpu_reset_clobbers_display(dev_priv)) {
3971                 /* for testing only restore the display */
3972                 ret = __intel_display_resume(dev, state, ctx);
3973                 if (ret)
3974                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3975         } else {
3976                 /*
3977                  * The display has been reset as well,
3978                  * so need a full re-initialization.
3979                  */
3980                 intel_runtime_pm_disable_interrupts(dev_priv);
3981                 intel_runtime_pm_enable_interrupts(dev_priv);
3982
3983                 intel_pps_unlock_regs_wa(dev_priv);
3984                 intel_modeset_init_hw(dev);
3985                 intel_init_clock_gating(dev_priv);
3986
3987                 spin_lock_irq(&dev_priv->irq_lock);
3988                 if (dev_priv->display.hpd_irq_setup)
3989                         dev_priv->display.hpd_irq_setup(dev_priv);
3990                 spin_unlock_irq(&dev_priv->irq_lock);
3991
3992                 ret = __intel_display_resume(dev, state, ctx);
3993                 if (ret)
3994                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3995
3996                 intel_hpd_init(dev_priv);
3997         }
3998
3999         drm_atomic_state_put(state);
4000 unlock:
4001         drm_modeset_drop_locks(ctx);
4002         drm_modeset_acquire_fini(ctx);
4003         mutex_unlock(&dev->mode_config.mutex);
4004
4005         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4006 }
4007
4008 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4009 {
4010         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4011         enum pipe pipe = crtc->pipe;
4012         u32 tmp;
4013
4014         tmp = I915_READ(PIPE_CHICKEN(pipe));
4015
4016         /*
4017          * Display WA #1153: icl
4018          * enable hardware to bypass the alpha math
4019          * and rounding for per-pixel values 00 and 0xff
4020          */
4021         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4022
4023         /*
4024          * W/A for underruns with linear/X-tiled with
4025          * WM1+ disabled.
4026          */
4027         tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
4028
4029         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4030 }
4031
4032 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4033                                      const struct intel_crtc_state *new_crtc_state)
4034 {
4035         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4036         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4037
4038         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4039         crtc->base.mode = new_crtc_state->base.mode;
4040
4041         /*
4042          * Update pipe size and adjust fitter if needed: the reason for this is
4043          * that in compute_mode_changes we check the native mode (not the pfit
4044          * mode) to see if we can flip rather than do a full mode set. In the
4045          * fastboot case, we'll flip, but if we don't update the pipesrc and
4046          * pfit state, we'll end up with a big fb scanned out into the wrong
4047          * sized surface.
4048          */
4049
4050         I915_WRITE(PIPESRC(crtc->pipe),
4051                    ((new_crtc_state->pipe_src_w - 1) << 16) |
4052                    (new_crtc_state->pipe_src_h - 1));
4053
4054         /* on skylake this is done by detaching scalers */
4055         if (INTEL_GEN(dev_priv) >= 9) {
4056                 skl_detach_scalers(new_crtc_state);
4057
4058                 if (new_crtc_state->pch_pfit.enabled)
4059                         skylake_pfit_enable(new_crtc_state);
4060         } else if (HAS_PCH_SPLIT(dev_priv)) {
4061                 if (new_crtc_state->pch_pfit.enabled)
4062                         ironlake_pfit_enable(new_crtc_state);
4063                 else if (old_crtc_state->pch_pfit.enabled)
4064                         ironlake_pfit_disable(old_crtc_state);
4065         }
4066
4067         /*
4068          * We don't (yet) allow userspace to control the pipe background color,
4069          * so force it to black, but apply pipe gamma and CSC so that its
4070          * handling will match how we program our planes.
4071          */
4072         if (INTEL_GEN(dev_priv) >= 9)
4073                 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
4074                            SKL_BOTTOM_COLOR_GAMMA_ENABLE |
4075                            SKL_BOTTOM_COLOR_CSC_ENABLE);
4076
4077         if (INTEL_GEN(dev_priv) >= 11)
4078                 icl_set_pipe_chicken(crtc);
4079 }
4080
4081 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4082 {
4083         struct drm_device *dev = crtc->base.dev;
4084         struct drm_i915_private *dev_priv = to_i915(dev);
4085         int pipe = crtc->pipe;
4086         i915_reg_t reg;
4087         u32 temp;
4088
4089         /* enable normal train */
4090         reg = FDI_TX_CTL(pipe);
4091         temp = I915_READ(reg);
4092         if (IS_IVYBRIDGE(dev_priv)) {
4093                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4094                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4095         } else {
4096                 temp &= ~FDI_LINK_TRAIN_NONE;
4097                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4098         }
4099         I915_WRITE(reg, temp);
4100
4101         reg = FDI_RX_CTL(pipe);
4102         temp = I915_READ(reg);
4103         if (HAS_PCH_CPT(dev_priv)) {
4104                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4105                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4106         } else {
4107                 temp &= ~FDI_LINK_TRAIN_NONE;
4108                 temp |= FDI_LINK_TRAIN_NONE;
4109         }
4110         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4111
4112         /* wait one idle pattern time */
4113         POSTING_READ(reg);
4114         udelay(1000);
4115
4116         /* IVB wants error correction enabled */
4117         if (IS_IVYBRIDGE(dev_priv))
4118                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4119                            FDI_FE_ERRC_ENABLE);
4120 }
4121
4122 /* The FDI link training functions for ILK/Ibexpeak. */
4123 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4124                                     const struct intel_crtc_state *crtc_state)
4125 {
4126         struct drm_device *dev = crtc->base.dev;
4127         struct drm_i915_private *dev_priv = to_i915(dev);
4128         int pipe = crtc->pipe;
4129         i915_reg_t reg;
4130         u32 temp, tries;
4131
4132         /* FDI needs bits from pipe first */
4133         assert_pipe_enabled(dev_priv, pipe);
4134
4135         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4136            for train result */
4137         reg = FDI_RX_IMR(pipe);
4138         temp = I915_READ(reg);
4139         temp &= ~FDI_RX_SYMBOL_LOCK;
4140         temp &= ~FDI_RX_BIT_LOCK;
4141         I915_WRITE(reg, temp);
4142         I915_READ(reg);
4143         udelay(150);
4144
4145         /* enable CPU FDI TX and PCH FDI RX */
4146         reg = FDI_TX_CTL(pipe);
4147         temp = I915_READ(reg);
4148         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4149         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4150         temp &= ~FDI_LINK_TRAIN_NONE;
4151         temp |= FDI_LINK_TRAIN_PATTERN_1;
4152         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4153
4154         reg = FDI_RX_CTL(pipe);
4155         temp = I915_READ(reg);
4156         temp &= ~FDI_LINK_TRAIN_NONE;
4157         temp |= FDI_LINK_TRAIN_PATTERN_1;
4158         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4159
4160         POSTING_READ(reg);
4161         udelay(150);
4162
4163         /* Ironlake workaround, enable clock pointer after FDI enable*/
4164         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4165         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4166                    FDI_RX_PHASE_SYNC_POINTER_EN);
4167
4168         reg = FDI_RX_IIR(pipe);
4169         for (tries = 0; tries < 5; tries++) {
4170                 temp = I915_READ(reg);
4171                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4172
4173                 if ((temp & FDI_RX_BIT_LOCK)) {
4174                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4175                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4176                         break;
4177                 }
4178         }
4179         if (tries == 5)
4180                 DRM_ERROR("FDI train 1 fail!\n");
4181
4182         /* Train 2 */
4183         reg = FDI_TX_CTL(pipe);
4184         temp = I915_READ(reg);
4185         temp &= ~FDI_LINK_TRAIN_NONE;
4186         temp |= FDI_LINK_TRAIN_PATTERN_2;
4187         I915_WRITE(reg, temp);
4188
4189         reg = FDI_RX_CTL(pipe);
4190         temp = I915_READ(reg);
4191         temp &= ~FDI_LINK_TRAIN_NONE;
4192         temp |= FDI_LINK_TRAIN_PATTERN_2;
4193         I915_WRITE(reg, temp);
4194
4195         POSTING_READ(reg);
4196         udelay(150);
4197
4198         reg = FDI_RX_IIR(pipe);
4199         for (tries = 0; tries < 5; tries++) {
4200                 temp = I915_READ(reg);
4201                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4202
4203                 if (temp & FDI_RX_SYMBOL_LOCK) {
4204                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4205                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4206                         break;
4207                 }
4208         }
4209         if (tries == 5)
4210                 DRM_ERROR("FDI train 2 fail!\n");
4211
4212         DRM_DEBUG_KMS("FDI train done\n");
4213
4214 }
4215
4216 static const int snb_b_fdi_train_param[] = {
4217         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4218         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4219         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4220         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4221 };
4222
4223 /* The FDI link training functions for SNB/Cougarpoint. */
4224 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4225                                 const struct intel_crtc_state *crtc_state)
4226 {
4227         struct drm_device *dev = crtc->base.dev;
4228         struct drm_i915_private *dev_priv = to_i915(dev);
4229         int pipe = crtc->pipe;
4230         i915_reg_t reg;
4231         u32 temp, i, retry;
4232
4233         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4234            for train result */
4235         reg = FDI_RX_IMR(pipe);
4236         temp = I915_READ(reg);
4237         temp &= ~FDI_RX_SYMBOL_LOCK;
4238         temp &= ~FDI_RX_BIT_LOCK;
4239         I915_WRITE(reg, temp);
4240
4241         POSTING_READ(reg);
4242         udelay(150);
4243
4244         /* enable CPU FDI TX and PCH FDI RX */
4245         reg = FDI_TX_CTL(pipe);
4246         temp = I915_READ(reg);
4247         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4248         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4249         temp &= ~FDI_LINK_TRAIN_NONE;
4250         temp |= FDI_LINK_TRAIN_PATTERN_1;
4251         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4252         /* SNB-B */
4253         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4254         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4255
4256         I915_WRITE(FDI_RX_MISC(pipe),
4257                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4258
4259         reg = FDI_RX_CTL(pipe);
4260         temp = I915_READ(reg);
4261         if (HAS_PCH_CPT(dev_priv)) {
4262                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4263                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4264         } else {
4265                 temp &= ~FDI_LINK_TRAIN_NONE;
4266                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4267         }
4268         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4269
4270         POSTING_READ(reg);
4271         udelay(150);
4272
4273         for (i = 0; i < 4; i++) {
4274                 reg = FDI_TX_CTL(pipe);
4275                 temp = I915_READ(reg);
4276                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4277                 temp |= snb_b_fdi_train_param[i];
4278                 I915_WRITE(reg, temp);
4279
4280                 POSTING_READ(reg);
4281                 udelay(500);
4282
4283                 for (retry = 0; retry < 5; retry++) {
4284                         reg = FDI_RX_IIR(pipe);
4285                         temp = I915_READ(reg);
4286                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4287                         if (temp & FDI_RX_BIT_LOCK) {
4288                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4289                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4290                                 break;
4291                         }
4292                         udelay(50);
4293                 }
4294                 if (retry < 5)
4295                         break;
4296         }
4297         if (i == 4)
4298                 DRM_ERROR("FDI train 1 fail!\n");
4299
4300         /* Train 2 */
4301         reg = FDI_TX_CTL(pipe);
4302         temp = I915_READ(reg);
4303         temp &= ~FDI_LINK_TRAIN_NONE;
4304         temp |= FDI_LINK_TRAIN_PATTERN_2;
4305         if (IS_GEN(dev_priv, 6)) {
4306                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4307                 /* SNB-B */
4308                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4309         }
4310         I915_WRITE(reg, temp);
4311
4312         reg = FDI_RX_CTL(pipe);
4313         temp = I915_READ(reg);
4314         if (HAS_PCH_CPT(dev_priv)) {
4315                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4316                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4317         } else {
4318                 temp &= ~FDI_LINK_TRAIN_NONE;
4319                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4320         }
4321         I915_WRITE(reg, temp);
4322
4323         POSTING_READ(reg);
4324         udelay(150);
4325
4326         for (i = 0; i < 4; i++) {
4327                 reg = FDI_TX_CTL(pipe);
4328                 temp = I915_READ(reg);
4329                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4330                 temp |= snb_b_fdi_train_param[i];
4331                 I915_WRITE(reg, temp);
4332
4333                 POSTING_READ(reg);
4334                 udelay(500);
4335
4336                 for (retry = 0; retry < 5; retry++) {
4337                         reg = FDI_RX_IIR(pipe);
4338                         temp = I915_READ(reg);
4339                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4340                         if (temp & FDI_RX_SYMBOL_LOCK) {
4341                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4342                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4343                                 break;
4344                         }
4345                         udelay(50);
4346                 }
4347                 if (retry < 5)
4348                         break;
4349         }
4350         if (i == 4)
4351                 DRM_ERROR("FDI train 2 fail!\n");
4352
4353         DRM_DEBUG_KMS("FDI train done.\n");
4354 }
4355
4356 /* Manual link training for Ivy Bridge A0 parts */
4357 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4358                                       const struct intel_crtc_state *crtc_state)
4359 {
4360         struct drm_device *dev = crtc->base.dev;
4361         struct drm_i915_private *dev_priv = to_i915(dev);
4362         int pipe = crtc->pipe;
4363         i915_reg_t reg;
4364         u32 temp, i, j;
4365
4366         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4367            for train result */
4368         reg = FDI_RX_IMR(pipe);
4369         temp = I915_READ(reg);
4370         temp &= ~FDI_RX_SYMBOL_LOCK;
4371         temp &= ~FDI_RX_BIT_LOCK;
4372         I915_WRITE(reg, temp);
4373
4374         POSTING_READ(reg);
4375         udelay(150);
4376
4377         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4378                       I915_READ(FDI_RX_IIR(pipe)));
4379
4380         /* Try each vswing and preemphasis setting twice before moving on */
4381         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4382                 /* disable first in case we need to retry */
4383                 reg = FDI_TX_CTL(pipe);
4384                 temp = I915_READ(reg);
4385                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4386                 temp &= ~FDI_TX_ENABLE;
4387                 I915_WRITE(reg, temp);
4388
4389                 reg = FDI_RX_CTL(pipe);
4390                 temp = I915_READ(reg);
4391                 temp &= ~FDI_LINK_TRAIN_AUTO;
4392                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4393                 temp &= ~FDI_RX_ENABLE;
4394                 I915_WRITE(reg, temp);
4395
4396                 /* enable CPU FDI TX and PCH FDI RX */
4397                 reg = FDI_TX_CTL(pipe);
4398                 temp = I915_READ(reg);
4399                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4400                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4401                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4402                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4403                 temp |= snb_b_fdi_train_param[j/2];
4404                 temp |= FDI_COMPOSITE_SYNC;
4405                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4406
4407                 I915_WRITE(FDI_RX_MISC(pipe),
4408                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4409
4410                 reg = FDI_RX_CTL(pipe);
4411                 temp = I915_READ(reg);
4412                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4413                 temp |= FDI_COMPOSITE_SYNC;
4414                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4415
4416                 POSTING_READ(reg);
4417                 udelay(1); /* should be 0.5us */
4418
4419                 for (i = 0; i < 4; i++) {
4420                         reg = FDI_RX_IIR(pipe);
4421                         temp = I915_READ(reg);
4422                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4423
4424                         if (temp & FDI_RX_BIT_LOCK ||
4425                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4426                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4427                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4428                                               i);
4429                                 break;
4430                         }
4431                         udelay(1); /* should be 0.5us */
4432                 }
4433                 if (i == 4) {
4434                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4435                         continue;
4436                 }
4437
4438                 /* Train 2 */
4439                 reg = FDI_TX_CTL(pipe);
4440                 temp = I915_READ(reg);
4441                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4442                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4443                 I915_WRITE(reg, temp);
4444
4445                 reg = FDI_RX_CTL(pipe);
4446                 temp = I915_READ(reg);
4447                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4448                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4449                 I915_WRITE(reg, temp);
4450
4451                 POSTING_READ(reg);
4452                 udelay(2); /* should be 1.5us */
4453
4454                 for (i = 0; i < 4; i++) {
4455                         reg = FDI_RX_IIR(pipe);
4456                         temp = I915_READ(reg);
4457                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4458
4459                         if (temp & FDI_RX_SYMBOL_LOCK ||
4460                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4461                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4462                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4463                                               i);
4464                                 goto train_done;
4465                         }
4466                         udelay(2); /* should be 1.5us */
4467                 }
4468                 if (i == 4)
4469                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4470         }
4471
4472 train_done:
4473         DRM_DEBUG_KMS("FDI train done.\n");
4474 }
4475
4476 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4477 {
4478         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4479         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4480         int pipe = intel_crtc->pipe;
4481         i915_reg_t reg;
4482         u32 temp;
4483
4484         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4485         reg = FDI_RX_CTL(pipe);
4486         temp = I915_READ(reg);
4487         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4488         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4489         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4490         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4491
4492         POSTING_READ(reg);
4493         udelay(200);
4494
4495         /* Switch from Rawclk to PCDclk */
4496         temp = I915_READ(reg);
4497         I915_WRITE(reg, temp | FDI_PCDCLK);
4498
4499         POSTING_READ(reg);
4500         udelay(200);
4501
4502         /* Enable CPU FDI TX PLL, always on for Ironlake */
4503         reg = FDI_TX_CTL(pipe);
4504         temp = I915_READ(reg);
4505         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4506                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4507
4508                 POSTING_READ(reg);
4509                 udelay(100);
4510         }
4511 }
4512
4513 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4514 {
4515         struct drm_device *dev = intel_crtc->base.dev;
4516         struct drm_i915_private *dev_priv = to_i915(dev);
4517         int pipe = intel_crtc->pipe;
4518         i915_reg_t reg;
4519         u32 temp;
4520
4521         /* Switch from PCDclk to Rawclk */
4522         reg = FDI_RX_CTL(pipe);
4523         temp = I915_READ(reg);
4524         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4525
4526         /* Disable CPU FDI TX PLL */
4527         reg = FDI_TX_CTL(pipe);
4528         temp = I915_READ(reg);
4529         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4530
4531         POSTING_READ(reg);
4532         udelay(100);
4533
4534         reg = FDI_RX_CTL(pipe);
4535         temp = I915_READ(reg);
4536         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4537
4538         /* Wait for the clocks to turn off. */
4539         POSTING_READ(reg);
4540         udelay(100);
4541 }
4542
4543 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4544 {
4545         struct drm_device *dev = crtc->dev;
4546         struct drm_i915_private *dev_priv = to_i915(dev);
4547         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4548         int pipe = intel_crtc->pipe;
4549         i915_reg_t reg;
4550         u32 temp;
4551
4552         /* disable CPU FDI tx and PCH FDI rx */
4553         reg = FDI_TX_CTL(pipe);
4554         temp = I915_READ(reg);
4555         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4556         POSTING_READ(reg);
4557
4558         reg = FDI_RX_CTL(pipe);
4559         temp = I915_READ(reg);
4560         temp &= ~(0x7 << 16);
4561         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4562         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4563
4564         POSTING_READ(reg);
4565         udelay(100);
4566
4567         /* Ironlake workaround, disable clock pointer after downing FDI */
4568         if (HAS_PCH_IBX(dev_priv))
4569                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4570
4571         /* still set train pattern 1 */
4572         reg = FDI_TX_CTL(pipe);
4573         temp = I915_READ(reg);
4574         temp &= ~FDI_LINK_TRAIN_NONE;
4575         temp |= FDI_LINK_TRAIN_PATTERN_1;
4576         I915_WRITE(reg, temp);
4577
4578         reg = FDI_RX_CTL(pipe);
4579         temp = I915_READ(reg);
4580         if (HAS_PCH_CPT(dev_priv)) {
4581                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4582                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4583         } else {
4584                 temp &= ~FDI_LINK_TRAIN_NONE;
4585                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4586         }
4587         /* BPC in FDI rx is consistent with that in PIPECONF */
4588         temp &= ~(0x07 << 16);
4589         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4590         I915_WRITE(reg, temp);
4591
4592         POSTING_READ(reg);
4593         udelay(100);
4594 }
4595
4596 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4597 {
4598         struct drm_crtc *crtc;
4599         bool cleanup_done;
4600
4601         drm_for_each_crtc(crtc, &dev_priv->drm) {
4602                 struct drm_crtc_commit *commit;
4603                 spin_lock(&crtc->commit_lock);
4604                 commit = list_first_entry_or_null(&crtc->commit_list,
4605                                                   struct drm_crtc_commit, commit_entry);
4606                 cleanup_done = commit ?
4607                         try_wait_for_completion(&commit->cleanup_done) : true;
4608                 spin_unlock(&crtc->commit_lock);
4609
4610                 if (cleanup_done)
4611                         continue;
4612
4613                 drm_crtc_wait_one_vblank(crtc);
4614
4615                 return true;
4616         }
4617
4618         return false;
4619 }
4620
4621 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4622 {
4623         u32 temp;
4624
4625         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4626
4627         mutex_lock(&dev_priv->sb_lock);
4628
4629         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4630         temp |= SBI_SSCCTL_DISABLE;
4631         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4632
4633         mutex_unlock(&dev_priv->sb_lock);
4634 }
4635
4636 /* Program iCLKIP clock to the desired frequency */
4637 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4638 {
4639         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4640         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4641         int clock = crtc_state->base.adjusted_mode.crtc_clock;
4642         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4643         u32 temp;
4644
4645         lpt_disable_iclkip(dev_priv);
4646
4647         /* The iCLK virtual clock root frequency is in MHz,
4648          * but the adjusted_mode->crtc_clock in in KHz. To get the
4649          * divisors, it is necessary to divide one by another, so we
4650          * convert the virtual clock precision to KHz here for higher
4651          * precision.
4652          */
4653         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4654                 u32 iclk_virtual_root_freq = 172800 * 1000;
4655                 u32 iclk_pi_range = 64;
4656                 u32 desired_divisor;
4657
4658                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4659                                                     clock << auxdiv);
4660                 divsel = (desired_divisor / iclk_pi_range) - 2;
4661                 phaseinc = desired_divisor % iclk_pi_range;
4662
4663                 /*
4664                  * Near 20MHz is a corner case which is
4665                  * out of range for the 7-bit divisor
4666                  */
4667                 if (divsel <= 0x7f)
4668                         break;
4669         }
4670
4671         /* This should not happen with any sane values */
4672         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4673                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4674         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4675                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4676
4677         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4678                         clock,
4679                         auxdiv,
4680                         divsel,
4681                         phasedir,
4682                         phaseinc);
4683
4684         mutex_lock(&dev_priv->sb_lock);
4685
4686         /* Program SSCDIVINTPHASE6 */
4687         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4688         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4689         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4690         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4691         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4692         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4693         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4694         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4695
4696         /* Program SSCAUXDIV */
4697         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4698         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4699         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4700         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4701
4702         /* Enable modulator and associated divider */
4703         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4704         temp &= ~SBI_SSCCTL_DISABLE;
4705         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4706
4707         mutex_unlock(&dev_priv->sb_lock);
4708
4709         /* Wait for initialization time */
4710         udelay(24);
4711
4712         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4713 }
4714
4715 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4716 {
4717         u32 divsel, phaseinc, auxdiv;
4718         u32 iclk_virtual_root_freq = 172800 * 1000;
4719         u32 iclk_pi_range = 64;
4720         u32 desired_divisor;
4721         u32 temp;
4722
4723         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4724                 return 0;
4725
4726         mutex_lock(&dev_priv->sb_lock);
4727
4728         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4729         if (temp & SBI_SSCCTL_DISABLE) {
4730                 mutex_unlock(&dev_priv->sb_lock);
4731                 return 0;
4732         }
4733
4734         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4735         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4736                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4737         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4738                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4739
4740         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4741         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4742                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4743
4744         mutex_unlock(&dev_priv->sb_lock);
4745
4746         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4747
4748         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4749                                  desired_divisor << auxdiv);
4750 }
4751
4752 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
4753                                                 enum pipe pch_transcoder)
4754 {
4755         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4756         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4757         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4758
4759         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4760                    I915_READ(HTOTAL(cpu_transcoder)));
4761         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4762                    I915_READ(HBLANK(cpu_transcoder)));
4763         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4764                    I915_READ(HSYNC(cpu_transcoder)));
4765
4766         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4767                    I915_READ(VTOTAL(cpu_transcoder)));
4768         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4769                    I915_READ(VBLANK(cpu_transcoder)));
4770         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4771                    I915_READ(VSYNC(cpu_transcoder)));
4772         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4773                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4774 }
4775
4776 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
4777 {
4778         u32 temp;
4779
4780         temp = I915_READ(SOUTH_CHICKEN1);
4781         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4782                 return;
4783
4784         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4785         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4786
4787         temp &= ~FDI_BC_BIFURCATION_SELECT;
4788         if (enable)
4789                 temp |= FDI_BC_BIFURCATION_SELECT;
4790
4791         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4792         I915_WRITE(SOUTH_CHICKEN1, temp);
4793         POSTING_READ(SOUTH_CHICKEN1);
4794 }
4795
4796 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
4797 {
4798         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4799         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4800
4801         switch (crtc->pipe) {
4802         case PIPE_A:
4803                 break;
4804         case PIPE_B:
4805                 if (crtc_state->fdi_lanes > 2)
4806                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
4807                 else
4808                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
4809
4810                 break;
4811         case PIPE_C:
4812                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
4813
4814                 break;
4815         default:
4816                 BUG();
4817         }
4818 }
4819
4820 /*
4821  * Finds the encoder associated with the given CRTC. This can only be
4822  * used when we know that the CRTC isn't feeding multiple encoders!
4823  */
4824 static struct intel_encoder *
4825 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4826                            const struct intel_crtc_state *crtc_state)
4827 {
4828         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4829         const struct drm_connector_state *connector_state;
4830         const struct drm_connector *connector;
4831         struct intel_encoder *encoder = NULL;
4832         int num_encoders = 0;
4833         int i;
4834
4835         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4836                 if (connector_state->crtc != &crtc->base)
4837                         continue;
4838
4839                 encoder = to_intel_encoder(connector_state->best_encoder);
4840                 num_encoders++;
4841         }
4842
4843         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4844              num_encoders, pipe_name(crtc->pipe));
4845
4846         return encoder;
4847 }
4848
4849 /*
4850  * Enable PCH resources required for PCH ports:
4851  *   - PCH PLLs
4852  *   - FDI training & RX/TX
4853  *   - update transcoder timings
4854  *   - DP transcoding bits
4855  *   - transcoder
4856  */
4857 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4858                                 const struct intel_crtc_state *crtc_state)
4859 {
4860         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4861         struct drm_device *dev = crtc->base.dev;
4862         struct drm_i915_private *dev_priv = to_i915(dev);
4863         int pipe = crtc->pipe;
4864         u32 temp;
4865
4866         assert_pch_transcoder_disabled(dev_priv, pipe);
4867
4868         if (IS_IVYBRIDGE(dev_priv))
4869                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
4870
4871         /* Write the TU size bits before fdi link training, so that error
4872          * detection works. */
4873         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4874                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4875
4876         /* For PCH output, training FDI link */
4877         dev_priv->display.fdi_link_train(crtc, crtc_state);
4878
4879         /* We need to program the right clock selection before writing the pixel
4880          * mutliplier into the DPLL. */
4881         if (HAS_PCH_CPT(dev_priv)) {
4882                 u32 sel;
4883
4884                 temp = I915_READ(PCH_DPLL_SEL);
4885                 temp |= TRANS_DPLL_ENABLE(pipe);
4886                 sel = TRANS_DPLLB_SEL(pipe);
4887                 if (crtc_state->shared_dpll ==
4888                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4889                         temp |= sel;
4890                 else
4891                         temp &= ~sel;
4892                 I915_WRITE(PCH_DPLL_SEL, temp);
4893         }
4894
4895         /* XXX: pch pll's can be enabled any time before we enable the PCH
4896          * transcoder, and we actually should do this to not upset any PCH
4897          * transcoder that already use the clock when we share it.
4898          *
4899          * Note that enable_shared_dpll tries to do the right thing, but
4900          * get_shared_dpll unconditionally resets the pll - we need that to have
4901          * the right LVDS enable sequence. */
4902         intel_enable_shared_dpll(crtc_state);
4903
4904         /* set transcoder timing, panel must allow it */
4905         assert_panel_unlocked(dev_priv, pipe);
4906         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
4907
4908         intel_fdi_normal_train(crtc);
4909
4910         /* For PCH DP, enable TRANS_DP_CTL */
4911         if (HAS_PCH_CPT(dev_priv) &&
4912             intel_crtc_has_dp_encoder(crtc_state)) {
4913                 const struct drm_display_mode *adjusted_mode =
4914                         &crtc_state->base.adjusted_mode;
4915                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4916                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4917                 enum port port;
4918
4919                 temp = I915_READ(reg);
4920                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4921                           TRANS_DP_SYNC_MASK |
4922                           TRANS_DP_BPC_MASK);
4923                 temp |= TRANS_DP_OUTPUT_ENABLE;
4924                 temp |= bpc << 9; /* same format but at 11:9 */
4925
4926                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4927                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4928                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4929                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4930
4931                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4932                 WARN_ON(port < PORT_B || port > PORT_D);
4933                 temp |= TRANS_DP_PORT_SEL(port);
4934
4935                 I915_WRITE(reg, temp);
4936         }
4937
4938         ironlake_enable_pch_transcoder(crtc_state);
4939 }
4940
4941 static void lpt_pch_enable(const struct intel_atomic_state *state,
4942                            const struct intel_crtc_state *crtc_state)
4943 {
4944         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4945         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4946         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4947
4948         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4949
4950         lpt_program_iclkip(crtc_state);
4951
4952         /* Set transcoder timing. */
4953         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
4954
4955         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4956 }
4957
4958 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4959 {
4960         struct drm_i915_private *dev_priv = to_i915(dev);
4961         i915_reg_t dslreg = PIPEDSL(pipe);
4962         u32 temp;
4963
4964         temp = I915_READ(dslreg);
4965         udelay(500);
4966         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4967                 if (wait_for(I915_READ(dslreg) != temp, 5))
4968                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4969         }
4970 }
4971
4972 /*
4973  * The hardware phase 0.0 refers to the center of the pixel.
4974  * We want to start from the top/left edge which is phase
4975  * -0.5. That matches how the hardware calculates the scaling
4976  * factors (from top-left of the first pixel to bottom-right
4977  * of the last pixel, as opposed to the pixel centers).
4978  *
4979  * For 4:2:0 subsampled chroma planes we obviously have to
4980  * adjust that so that the chroma sample position lands in
4981  * the right spot.
4982  *
4983  * Note that for packed YCbCr 4:2:2 formats there is no way to
4984  * control chroma siting. The hardware simply replicates the
4985  * chroma samples for both of the luma samples, and thus we don't
4986  * actually get the expected MPEG2 chroma siting convention :(
4987  * The same behaviour is observed on pre-SKL platforms as well.
4988  *
4989  * Theory behind the formula (note that we ignore sub-pixel
4990  * source coordinates):
4991  * s = source sample position
4992  * d = destination sample position
4993  *
4994  * Downscaling 4:1:
4995  * -0.5
4996  * | 0.0
4997  * | |     1.5 (initial phase)
4998  * | |     |
4999  * v v     v
5000  * | s | s | s | s |
5001  * |       d       |
5002  *
5003  * Upscaling 1:4:
5004  * -0.5
5005  * | -0.375 (initial phase)
5006  * | |     0.0
5007  * | |     |
5008  * v v     v
5009  * |       s       |
5010  * | d | d | d | d |
5011  */
5012 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5013 {
5014         int phase = -0x8000;
5015         u16 trip = 0;
5016
5017         if (chroma_cosited)
5018                 phase += (sub - 1) * 0x8000 / sub;
5019
5020         phase += scale / (2 * sub);
5021
5022         /*
5023          * Hardware initial phase limited to [-0.5:1.5].
5024          * Since the max hardware scale factor is 3.0, we
5025          * should never actually excdeed 1.0 here.
5026          */
5027         WARN_ON(phase < -0x8000 || phase > 0x18000);
5028
5029         if (phase < 0)
5030                 phase = 0x10000 + phase;
5031         else
5032                 trip = PS_PHASE_TRIP;
5033
5034         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5035 }
5036
5037 static int
5038 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5039                   unsigned int scaler_user, int *scaler_id,
5040                   int src_w, int src_h, int dst_w, int dst_h,
5041                   const struct drm_format_info *format, bool need_scaler)
5042 {
5043         struct intel_crtc_scaler_state *scaler_state =
5044                 &crtc_state->scaler_state;
5045         struct intel_crtc *intel_crtc =
5046                 to_intel_crtc(crtc_state->base.crtc);
5047         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5048         const struct drm_display_mode *adjusted_mode =
5049                 &crtc_state->base.adjusted_mode;
5050
5051         /*
5052          * Src coordinates are already rotated by 270 degrees for
5053          * the 90/270 degree plane rotation cases (to match the
5054          * GTT mapping), hence no need to account for rotation here.
5055          */
5056         if (src_w != dst_w || src_h != dst_h)
5057                 need_scaler = true;
5058
5059         /*
5060          * Scaling/fitting not supported in IF-ID mode in GEN9+
5061          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5062          * Once NV12 is enabled, handle it here while allocating scaler
5063          * for NV12.
5064          */
5065         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5066             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5067                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5068                 return -EINVAL;
5069         }
5070
5071         /*
5072          * if plane is being disabled or scaler is no more required or force detach
5073          *  - free scaler binded to this plane/crtc
5074          *  - in order to do this, update crtc->scaler_usage
5075          *
5076          * Here scaler state in crtc_state is set free so that
5077          * scaler can be assigned to other user. Actual register
5078          * update to free the scaler is done in plane/panel-fit programming.
5079          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5080          */
5081         if (force_detach || !need_scaler) {
5082                 if (*scaler_id >= 0) {
5083                         scaler_state->scaler_users &= ~(1 << scaler_user);
5084                         scaler_state->scalers[*scaler_id].in_use = 0;
5085
5086                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5087                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5088                                 intel_crtc->pipe, scaler_user, *scaler_id,
5089                                 scaler_state->scaler_users);
5090                         *scaler_id = -1;
5091                 }
5092                 return 0;
5093         }
5094
5095         if (format && is_planar_yuv_format(format->format) &&
5096             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5097                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5098                 return -EINVAL;
5099         }
5100
5101         /* range checks */
5102         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5103             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5104             (IS_GEN(dev_priv, 11) &&
5105              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5106               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5107             (!IS_GEN(dev_priv, 11) &&
5108              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5109               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5110                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5111                         "size is out of scaler range\n",
5112                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5113                 return -EINVAL;
5114         }
5115
5116         /* mark this plane as a scaler user in crtc_state */
5117         scaler_state->scaler_users |= (1 << scaler_user);
5118         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5119                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5120                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5121                 scaler_state->scaler_users);
5122
5123         return 0;
5124 }
5125
5126 /**
5127  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5128  *
5129  * @state: crtc's scaler state
5130  *
5131  * Return
5132  *     0 - scaler_usage updated successfully
5133  *    error - requested scaling cannot be supported or other error condition
5134  */
5135 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5136 {
5137         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5138         bool need_scaler = false;
5139
5140         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5141                 need_scaler = true;
5142
5143         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5144                                  &state->scaler_state.scaler_id,
5145                                  state->pipe_src_w, state->pipe_src_h,
5146                                  adjusted_mode->crtc_hdisplay,
5147                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5148 }
5149
5150 /**
5151  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5152  * @crtc_state: crtc's scaler state
5153  * @plane_state: atomic plane state to update
5154  *
5155  * Return
5156  *     0 - scaler_usage updated successfully
5157  *    error - requested scaling cannot be supported or other error condition
5158  */
5159 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5160                                    struct intel_plane_state *plane_state)
5161 {
5162         struct intel_plane *intel_plane =
5163                 to_intel_plane(plane_state->base.plane);
5164         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5165         struct drm_framebuffer *fb = plane_state->base.fb;
5166         int ret;
5167         bool force_detach = !fb || !plane_state->base.visible;
5168         bool need_scaler = false;
5169
5170         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5171         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5172             fb && is_planar_yuv_format(fb->format->format))
5173                 need_scaler = true;
5174
5175         ret = skl_update_scaler(crtc_state, force_detach,
5176                                 drm_plane_index(&intel_plane->base),
5177                                 &plane_state->scaler_id,
5178                                 drm_rect_width(&plane_state->base.src) >> 16,
5179                                 drm_rect_height(&plane_state->base.src) >> 16,
5180                                 drm_rect_width(&plane_state->base.dst),
5181                                 drm_rect_height(&plane_state->base.dst),
5182                                 fb ? fb->format : NULL, need_scaler);
5183
5184         if (ret || plane_state->scaler_id < 0)
5185                 return ret;
5186
5187         /* check colorkey */
5188         if (plane_state->ckey.flags) {
5189                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5190                               intel_plane->base.base.id,
5191                               intel_plane->base.name);
5192                 return -EINVAL;
5193         }
5194
5195         /* Check src format */
5196         switch (fb->format->format) {
5197         case DRM_FORMAT_RGB565:
5198         case DRM_FORMAT_XBGR8888:
5199         case DRM_FORMAT_XRGB8888:
5200         case DRM_FORMAT_ABGR8888:
5201         case DRM_FORMAT_ARGB8888:
5202         case DRM_FORMAT_XRGB2101010:
5203         case DRM_FORMAT_XBGR2101010:
5204         case DRM_FORMAT_XBGR16161616F:
5205         case DRM_FORMAT_ABGR16161616F:
5206         case DRM_FORMAT_XRGB16161616F:
5207         case DRM_FORMAT_ARGB16161616F:
5208         case DRM_FORMAT_YUYV:
5209         case DRM_FORMAT_YVYU:
5210         case DRM_FORMAT_UYVY:
5211         case DRM_FORMAT_VYUY:
5212         case DRM_FORMAT_NV12:
5213         case DRM_FORMAT_P010:
5214         case DRM_FORMAT_P012:
5215         case DRM_FORMAT_P016:
5216         case DRM_FORMAT_Y210:
5217         case DRM_FORMAT_Y212:
5218         case DRM_FORMAT_Y216:
5219         case DRM_FORMAT_XVYU2101010:
5220         case DRM_FORMAT_XVYU12_16161616:
5221         case DRM_FORMAT_XVYU16161616:
5222                 break;
5223         default:
5224                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5225                               intel_plane->base.base.id, intel_plane->base.name,
5226                               fb->base.id, fb->format->format);
5227                 return -EINVAL;
5228         }
5229
5230         return 0;
5231 }
5232
5233 static void skylake_scaler_disable(struct intel_crtc *crtc)
5234 {
5235         int i;
5236
5237         for (i = 0; i < crtc->num_scalers; i++)
5238                 skl_detach_scaler(crtc, i);
5239 }
5240
5241 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5242 {
5243         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5244         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5245         enum pipe pipe = crtc->pipe;
5246         const struct intel_crtc_scaler_state *scaler_state =
5247                 &crtc_state->scaler_state;
5248
5249         if (crtc_state->pch_pfit.enabled) {
5250                 u16 uv_rgb_hphase, uv_rgb_vphase;
5251                 int pfit_w, pfit_h, hscale, vscale;
5252                 int id;
5253
5254                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5255                         return;
5256
5257                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5258                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5259
5260                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5261                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5262
5263                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5264                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5265
5266                 id = scaler_state->scaler_id;
5267                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5268                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5269                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5270                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5271                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5272                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5273                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5274                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5275         }
5276 }
5277
5278 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5279 {
5280         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5281         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5282         int pipe = crtc->pipe;
5283
5284         if (crtc_state->pch_pfit.enabled) {
5285                 /* Force use of hard-coded filter coefficients
5286                  * as some pre-programmed values are broken,
5287                  * e.g. x201.
5288                  */
5289                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5290                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5291                                                  PF_PIPE_SEL_IVB(pipe));
5292                 else
5293                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5294                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5295                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5296         }
5297 }
5298
5299 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5300 {
5301         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5302         struct drm_device *dev = crtc->base.dev;
5303         struct drm_i915_private *dev_priv = to_i915(dev);
5304
5305         if (!crtc_state->ips_enabled)
5306                 return;
5307
5308         /*
5309          * We can only enable IPS after we enable a plane and wait for a vblank
5310          * This function is called from post_plane_update, which is run after
5311          * a vblank wait.
5312          */
5313         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5314
5315         if (IS_BROADWELL(dev_priv)) {
5316                 mutex_lock(&dev_priv->pcu_lock);
5317                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5318                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5319                 mutex_unlock(&dev_priv->pcu_lock);
5320                 /* Quoting Art Runyan: "its not safe to expect any particular
5321                  * value in IPS_CTL bit 31 after enabling IPS through the
5322                  * mailbox." Moreover, the mailbox may return a bogus state,
5323                  * so we need to just enable it and continue on.
5324                  */
5325         } else {
5326                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5327                 /* The bit only becomes 1 in the next vblank, so this wait here
5328                  * is essentially intel_wait_for_vblank. If we don't have this
5329                  * and don't wait for vblanks until the end of crtc_enable, then
5330                  * the HW state readout code will complain that the expected
5331                  * IPS_CTL value is not the one we read. */
5332                 if (intel_wait_for_register(dev_priv,
5333                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5334                                             50))
5335                         DRM_ERROR("Timed out waiting for IPS enable\n");
5336         }
5337 }
5338
5339 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5340 {
5341         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5342         struct drm_device *dev = crtc->base.dev;
5343         struct drm_i915_private *dev_priv = to_i915(dev);
5344
5345         if (!crtc_state->ips_enabled)
5346                 return;
5347
5348         if (IS_BROADWELL(dev_priv)) {
5349                 mutex_lock(&dev_priv->pcu_lock);
5350                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5351                 mutex_unlock(&dev_priv->pcu_lock);
5352                 /*
5353                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5354                  * 42ms timeout value leads to occasional timeouts so use 100ms
5355                  * instead.
5356                  */
5357                 if (intel_wait_for_register(dev_priv,
5358                                             IPS_CTL, IPS_ENABLE, 0,
5359                                             100))
5360                         DRM_ERROR("Timed out waiting for IPS disable\n");
5361         } else {
5362                 I915_WRITE(IPS_CTL, 0);
5363                 POSTING_READ(IPS_CTL);
5364         }
5365
5366         /* We need to wait for a vblank before we can disable the plane. */
5367         intel_wait_for_vblank(dev_priv, crtc->pipe);
5368 }
5369
5370 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5371 {
5372         if (intel_crtc->overlay) {
5373                 struct drm_device *dev = intel_crtc->base.dev;
5374
5375                 mutex_lock(&dev->struct_mutex);
5376                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5377                 mutex_unlock(&dev->struct_mutex);
5378         }
5379
5380         /* Let userspace switch the overlay on again. In most cases userspace
5381          * has to recompute where to put it anyway.
5382          */
5383 }
5384
5385 /**
5386  * intel_post_enable_primary - Perform operations after enabling primary plane
5387  * @crtc: the CRTC whose primary plane was just enabled
5388  * @new_crtc_state: the enabling state
5389  *
5390  * Performs potentially sleeping operations that must be done after the primary
5391  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5392  * called due to an explicit primary plane update, or due to an implicit
5393  * re-enable that is caused when a sprite plane is updated to no longer
5394  * completely hide the primary plane.
5395  */
5396 static void
5397 intel_post_enable_primary(struct drm_crtc *crtc,
5398                           const struct intel_crtc_state *new_crtc_state)
5399 {
5400         struct drm_device *dev = crtc->dev;
5401         struct drm_i915_private *dev_priv = to_i915(dev);
5402         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5403         int pipe = intel_crtc->pipe;
5404
5405         /*
5406          * Gen2 reports pipe underruns whenever all planes are disabled.
5407          * So don't enable underrun reporting before at least some planes
5408          * are enabled.
5409          * FIXME: Need to fix the logic to work when we turn off all planes
5410          * but leave the pipe running.
5411          */
5412         if (IS_GEN(dev_priv, 2))
5413                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5414
5415         /* Underruns don't always raise interrupts, so check manually. */
5416         intel_check_cpu_fifo_underruns(dev_priv);
5417         intel_check_pch_fifo_underruns(dev_priv);
5418 }
5419
5420 /* FIXME get rid of this and use pre_plane_update */
5421 static void
5422 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5423 {
5424         struct drm_device *dev = crtc->dev;
5425         struct drm_i915_private *dev_priv = to_i915(dev);
5426         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5427         int pipe = intel_crtc->pipe;
5428
5429         /*
5430          * Gen2 reports pipe underruns whenever all planes are disabled.
5431          * So disable underrun reporting before all the planes get disabled.
5432          */
5433         if (IS_GEN(dev_priv, 2))
5434                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5435
5436         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5437
5438         /*
5439          * Vblank time updates from the shadow to live plane control register
5440          * are blocked if the memory self-refresh mode is active at that
5441          * moment. So to make sure the plane gets truly disabled, disable
5442          * first the self-refresh mode. The self-refresh enable bit in turn
5443          * will be checked/applied by the HW only at the next frame start
5444          * event which is after the vblank start event, so we need to have a
5445          * wait-for-vblank between disabling the plane and the pipe.
5446          */
5447         if (HAS_GMCH(dev_priv) &&
5448             intel_set_memory_cxsr(dev_priv, false))
5449                 intel_wait_for_vblank(dev_priv, pipe);
5450 }
5451
5452 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5453                                        const struct intel_crtc_state *new_crtc_state)
5454 {
5455         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5456         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5457
5458         if (!old_crtc_state->ips_enabled)
5459                 return false;
5460
5461         if (needs_modeset(&new_crtc_state->base))
5462                 return true;
5463
5464         /*
5465          * Workaround : Do not read or write the pipe palette/gamma data while
5466          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5467          *
5468          * Disable IPS before we program the LUT.
5469          */
5470         if (IS_HASWELL(dev_priv) &&
5471             (new_crtc_state->base.color_mgmt_changed ||
5472              new_crtc_state->update_pipe) &&
5473             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5474                 return true;
5475
5476         return !new_crtc_state->ips_enabled;
5477 }
5478
5479 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5480                                        const struct intel_crtc_state *new_crtc_state)
5481 {
5482         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5483         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5484
5485         if (!new_crtc_state->ips_enabled)
5486                 return false;
5487
5488         if (needs_modeset(&new_crtc_state->base))
5489                 return true;
5490
5491         /*
5492          * Workaround : Do not read or write the pipe palette/gamma data while
5493          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5494          *
5495          * Re-enable IPS after the LUT has been programmed.
5496          */
5497         if (IS_HASWELL(dev_priv) &&
5498             (new_crtc_state->base.color_mgmt_changed ||
5499              new_crtc_state->update_pipe) &&
5500             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5501                 return true;
5502
5503         /*
5504          * We can't read out IPS on broadwell, assume the worst and
5505          * forcibly enable IPS on the first fastset.
5506          */
5507         if (new_crtc_state->update_pipe &&
5508             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5509                 return true;
5510
5511         return !old_crtc_state->ips_enabled;
5512 }
5513
5514 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5515                           const struct intel_crtc_state *crtc_state)
5516 {
5517         if (!crtc_state->nv12_planes)
5518                 return false;
5519
5520         /* WA Display #0827: Gen9:all */
5521         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5522                 return true;
5523
5524         return false;
5525 }
5526
5527 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5528 {
5529         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5530         struct drm_device *dev = crtc->base.dev;
5531         struct drm_i915_private *dev_priv = to_i915(dev);
5532         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5533         struct intel_crtc_state *pipe_config =
5534                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5535                                                 crtc);
5536         struct drm_plane *primary = crtc->base.primary;
5537         struct drm_plane_state *old_primary_state =
5538                 drm_atomic_get_old_plane_state(old_state, primary);
5539
5540         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5541
5542         if (pipe_config->update_wm_post && pipe_config->base.active)
5543                 intel_update_watermarks(crtc);
5544
5545         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5546                 hsw_enable_ips(pipe_config);
5547
5548         if (old_primary_state) {
5549                 struct drm_plane_state *new_primary_state =
5550                         drm_atomic_get_new_plane_state(old_state, primary);
5551
5552                 intel_fbc_post_update(crtc);
5553
5554                 if (new_primary_state->visible &&
5555                     (needs_modeset(&pipe_config->base) ||
5556                      !old_primary_state->visible))
5557                         intel_post_enable_primary(&crtc->base, pipe_config);
5558         }
5559
5560         /* Display WA 827 */
5561         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5562             !needs_nv12_wa(dev_priv, pipe_config)) {
5563                 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5564         }
5565 }
5566
5567 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5568                                    struct intel_crtc_state *pipe_config)
5569 {
5570         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5571         struct drm_device *dev = crtc->base.dev;
5572         struct drm_i915_private *dev_priv = to_i915(dev);
5573         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5574         struct drm_plane *primary = crtc->base.primary;
5575         struct drm_plane_state *old_primary_state =
5576                 drm_atomic_get_old_plane_state(old_state, primary);
5577         bool modeset = needs_modeset(&pipe_config->base);
5578         struct intel_atomic_state *old_intel_state =
5579                 to_intel_atomic_state(old_state);
5580
5581         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5582                 hsw_disable_ips(old_crtc_state);
5583
5584         if (old_primary_state) {
5585                 struct intel_plane_state *new_primary_state =
5586                         intel_atomic_get_new_plane_state(old_intel_state,
5587                                                          to_intel_plane(primary));
5588
5589                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5590                 /*
5591                  * Gen2 reports pipe underruns whenever all planes are disabled.
5592                  * So disable underrun reporting before all the planes get disabled.
5593                  */
5594                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5595                     (modeset || !new_primary_state->base.visible))
5596                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5597         }
5598
5599         /* Display WA 827 */
5600         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5601             needs_nv12_wa(dev_priv, pipe_config)) {
5602                 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5603         }
5604
5605         /*
5606          * Vblank time updates from the shadow to live plane control register
5607          * are blocked if the memory self-refresh mode is active at that
5608          * moment. So to make sure the plane gets truly disabled, disable
5609          * first the self-refresh mode. The self-refresh enable bit in turn
5610          * will be checked/applied by the HW only at the next frame start
5611          * event which is after the vblank start event, so we need to have a
5612          * wait-for-vblank between disabling the plane and the pipe.
5613          */
5614         if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5615             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5616                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5617
5618         /*
5619          * IVB workaround: must disable low power watermarks for at least
5620          * one frame before enabling scaling.  LP watermarks can be re-enabled
5621          * when scaling is disabled.
5622          *
5623          * WaCxSRDisabledForSpriteScaling:ivb
5624          */
5625         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5626             old_crtc_state->base.active)
5627                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5628
5629         /*
5630          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5631          * watermark programming here.
5632          */
5633         if (needs_modeset(&pipe_config->base))
5634                 return;
5635
5636         /*
5637          * For platforms that support atomic watermarks, program the
5638          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5639          * will be the intermediate values that are safe for both pre- and
5640          * post- vblank; when vblank happens, the 'active' values will be set
5641          * to the final 'target' values and we'll do this again to get the
5642          * optimal watermarks.  For gen9+ platforms, the values we program here
5643          * will be the final target values which will get automatically latched
5644          * at vblank time; no further programming will be necessary.
5645          *
5646          * If a platform hasn't been transitioned to atomic watermarks yet,
5647          * we'll continue to update watermarks the old way, if flags tell
5648          * us to.
5649          */
5650         if (dev_priv->display.initial_watermarks != NULL)
5651                 dev_priv->display.initial_watermarks(old_intel_state,
5652                                                      pipe_config);
5653         else if (pipe_config->update_wm_pre)
5654                 intel_update_watermarks(crtc);
5655 }
5656
5657 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
5658                                       struct intel_crtc *crtc)
5659 {
5660         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5661         const struct intel_crtc_state *new_crtc_state =
5662                 intel_atomic_get_new_crtc_state(state, crtc);
5663         unsigned int update_mask = new_crtc_state->update_planes;
5664         const struct intel_plane_state *old_plane_state;
5665         struct intel_plane *plane;
5666         unsigned fb_bits = 0;
5667         int i;
5668
5669         intel_crtc_dpms_overlay_disable(crtc);
5670
5671         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
5672                 if (crtc->pipe != plane->pipe ||
5673                     !(update_mask & BIT(plane->id)))
5674                         continue;
5675
5676                 plane->disable_plane(plane, new_crtc_state);
5677
5678                 if (old_plane_state->base.visible)
5679                         fb_bits |= plane->frontbuffer_bit;
5680         }
5681
5682         intel_frontbuffer_flip(dev_priv, fb_bits);
5683 }
5684
5685 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5686                                           struct intel_crtc_state *crtc_state,
5687                                           struct drm_atomic_state *old_state)
5688 {
5689         struct drm_connector_state *conn_state;
5690         struct drm_connector *conn;
5691         int i;
5692
5693         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5694                 struct intel_encoder *encoder =
5695                         to_intel_encoder(conn_state->best_encoder);
5696
5697                 if (conn_state->crtc != crtc)
5698                         continue;
5699
5700                 if (encoder->pre_pll_enable)
5701                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5702         }
5703 }
5704
5705 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5706                                       struct intel_crtc_state *crtc_state,
5707                                       struct drm_atomic_state *old_state)
5708 {
5709         struct drm_connector_state *conn_state;
5710         struct drm_connector *conn;
5711         int i;
5712
5713         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5714                 struct intel_encoder *encoder =
5715                         to_intel_encoder(conn_state->best_encoder);
5716
5717                 if (conn_state->crtc != crtc)
5718                         continue;
5719
5720                 if (encoder->pre_enable)
5721                         encoder->pre_enable(encoder, crtc_state, conn_state);
5722         }
5723 }
5724
5725 static void intel_encoders_enable(struct drm_crtc *crtc,
5726                                   struct intel_crtc_state *crtc_state,
5727                                   struct drm_atomic_state *old_state)
5728 {
5729         struct drm_connector_state *conn_state;
5730         struct drm_connector *conn;
5731         int i;
5732
5733         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5734                 struct intel_encoder *encoder =
5735                         to_intel_encoder(conn_state->best_encoder);
5736
5737                 if (conn_state->crtc != crtc)
5738                         continue;
5739
5740                 if (encoder->enable)
5741                         encoder->enable(encoder, crtc_state, conn_state);
5742                 intel_opregion_notify_encoder(encoder, true);
5743         }
5744 }
5745
5746 static void intel_encoders_disable(struct drm_crtc *crtc,
5747                                    struct intel_crtc_state *old_crtc_state,
5748                                    struct drm_atomic_state *old_state)
5749 {
5750         struct drm_connector_state *old_conn_state;
5751         struct drm_connector *conn;
5752         int i;
5753
5754         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5755                 struct intel_encoder *encoder =
5756                         to_intel_encoder(old_conn_state->best_encoder);
5757
5758                 if (old_conn_state->crtc != crtc)
5759                         continue;
5760
5761                 intel_opregion_notify_encoder(encoder, false);
5762                 if (encoder->disable)
5763                         encoder->disable(encoder, old_crtc_state, old_conn_state);
5764         }
5765 }
5766
5767 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5768                                         struct intel_crtc_state *old_crtc_state,
5769                                         struct drm_atomic_state *old_state)
5770 {
5771         struct drm_connector_state *old_conn_state;
5772         struct drm_connector *conn;
5773         int i;
5774
5775         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5776                 struct intel_encoder *encoder =
5777                         to_intel_encoder(old_conn_state->best_encoder);
5778
5779                 if (old_conn_state->crtc != crtc)
5780                         continue;
5781
5782                 if (encoder->post_disable)
5783                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5784         }
5785 }
5786
5787 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5788                                             struct intel_crtc_state *old_crtc_state,
5789                                             struct drm_atomic_state *old_state)
5790 {
5791         struct drm_connector_state *old_conn_state;
5792         struct drm_connector *conn;
5793         int i;
5794
5795         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5796                 struct intel_encoder *encoder =
5797                         to_intel_encoder(old_conn_state->best_encoder);
5798
5799                 if (old_conn_state->crtc != crtc)
5800                         continue;
5801
5802                 if (encoder->post_pll_disable)
5803                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5804         }
5805 }
5806
5807 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5808                                        struct intel_crtc_state *crtc_state,
5809                                        struct drm_atomic_state *old_state)
5810 {
5811         struct drm_connector_state *conn_state;
5812         struct drm_connector *conn;
5813         int i;
5814
5815         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5816                 struct intel_encoder *encoder =
5817                         to_intel_encoder(conn_state->best_encoder);
5818
5819                 if (conn_state->crtc != crtc)
5820                         continue;
5821
5822                 if (encoder->update_pipe)
5823                         encoder->update_pipe(encoder, crtc_state, conn_state);
5824         }
5825 }
5826
5827 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5828                                  struct drm_atomic_state *old_state)
5829 {
5830         struct drm_crtc *crtc = pipe_config->base.crtc;
5831         struct drm_device *dev = crtc->dev;
5832         struct drm_i915_private *dev_priv = to_i915(dev);
5833         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5834         int pipe = intel_crtc->pipe;
5835         struct intel_atomic_state *old_intel_state =
5836                 to_intel_atomic_state(old_state);
5837
5838         if (WARN_ON(intel_crtc->active))
5839                 return;
5840
5841         /*
5842          * Sometimes spurious CPU pipe underruns happen during FDI
5843          * training, at least with VGA+HDMI cloning. Suppress them.
5844          *
5845          * On ILK we get an occasional spurious CPU pipe underruns
5846          * between eDP port A enable and vdd enable. Also PCH port
5847          * enable seems to result in the occasional CPU pipe underrun.
5848          *
5849          * Spurious PCH underruns also occur during PCH enabling.
5850          */
5851         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5852         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5853
5854         if (pipe_config->has_pch_encoder)
5855                 intel_prepare_shared_dpll(pipe_config);
5856
5857         if (intel_crtc_has_dp_encoder(pipe_config))
5858                 intel_dp_set_m_n(pipe_config, M1_N1);
5859
5860         intel_set_pipe_timings(pipe_config);
5861         intel_set_pipe_src_size(pipe_config);
5862
5863         if (pipe_config->has_pch_encoder) {
5864                 intel_cpu_transcoder_set_m_n(pipe_config,
5865                                              &pipe_config->fdi_m_n, NULL);
5866         }
5867
5868         ironlake_set_pipeconf(pipe_config);
5869
5870         intel_crtc->active = true;
5871
5872         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5873
5874         if (pipe_config->has_pch_encoder) {
5875                 /* Note: FDI PLL enabling _must_ be done before we enable the
5876                  * cpu pipes, hence this is separate from all the other fdi/pch
5877                  * enabling. */
5878                 ironlake_fdi_pll_enable(pipe_config);
5879         } else {
5880                 assert_fdi_tx_disabled(dev_priv, pipe);
5881                 assert_fdi_rx_disabled(dev_priv, pipe);
5882         }
5883
5884         ironlake_pfit_enable(pipe_config);
5885
5886         /*
5887          * On ILK+ LUT must be loaded before the pipe is running but with
5888          * clocks enabled
5889          */
5890         intel_color_load_luts(pipe_config);
5891         intel_color_commit(pipe_config);
5892
5893         if (dev_priv->display.initial_watermarks != NULL)
5894                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5895         intel_enable_pipe(pipe_config);
5896
5897         if (pipe_config->has_pch_encoder)
5898                 ironlake_pch_enable(old_intel_state, pipe_config);
5899
5900         assert_vblank_disabled(crtc);
5901         intel_crtc_vblank_on(pipe_config);
5902
5903         intel_encoders_enable(crtc, pipe_config, old_state);
5904
5905         if (HAS_PCH_CPT(dev_priv))
5906                 cpt_verify_modeset(dev, intel_crtc->pipe);
5907
5908         /*
5909          * Must wait for vblank to avoid spurious PCH FIFO underruns.
5910          * And a second vblank wait is needed at least on ILK with
5911          * some interlaced HDMI modes. Let's do the double wait always
5912          * in case there are more corner cases we don't know about.
5913          */
5914         if (pipe_config->has_pch_encoder) {
5915                 intel_wait_for_vblank(dev_priv, pipe);
5916                 intel_wait_for_vblank(dev_priv, pipe);
5917         }
5918         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5919         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5920 }
5921
5922 /* IPS only exists on ULT machines and is tied to pipe A. */
5923 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5924 {
5925         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5926 }
5927
5928 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5929                                             enum pipe pipe, bool apply)
5930 {
5931         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5932         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5933
5934         if (apply)
5935                 val |= mask;
5936         else
5937                 val &= ~mask;
5938
5939         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5940 }
5941
5942 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5943 {
5944         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5945         enum pipe pipe = crtc->pipe;
5946         u32 val;
5947
5948         val = MBUS_DBOX_A_CREDIT(2);
5949         val |= MBUS_DBOX_BW_CREDIT(1);
5950         val |= MBUS_DBOX_B_CREDIT(8);
5951
5952         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5953 }
5954
5955 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5956                                 struct drm_atomic_state *old_state)
5957 {
5958         struct drm_crtc *crtc = pipe_config->base.crtc;
5959         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5960         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5961         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5962         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5963         struct intel_atomic_state *old_intel_state =
5964                 to_intel_atomic_state(old_state);
5965         bool psl_clkgate_wa;
5966
5967         if (WARN_ON(intel_crtc->active))
5968                 return;
5969
5970         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5971
5972         if (pipe_config->shared_dpll)
5973                 intel_enable_shared_dpll(pipe_config);
5974
5975         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5976
5977         if (intel_crtc_has_dp_encoder(pipe_config))
5978                 intel_dp_set_m_n(pipe_config, M1_N1);
5979
5980         if (!transcoder_is_dsi(cpu_transcoder))
5981                 intel_set_pipe_timings(pipe_config);
5982
5983         intel_set_pipe_src_size(pipe_config);
5984
5985         if (cpu_transcoder != TRANSCODER_EDP &&
5986             !transcoder_is_dsi(cpu_transcoder)) {
5987                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5988                            pipe_config->pixel_multiplier - 1);
5989         }
5990
5991         if (pipe_config->has_pch_encoder) {
5992                 intel_cpu_transcoder_set_m_n(pipe_config,
5993                                              &pipe_config->fdi_m_n, NULL);
5994         }
5995
5996         if (!transcoder_is_dsi(cpu_transcoder))
5997                 haswell_set_pipeconf(pipe_config);
5998
5999         haswell_set_pipemisc(pipe_config);
6000
6001         intel_crtc->active = true;
6002
6003         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6004         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6005                          pipe_config->pch_pfit.enabled;
6006         if (psl_clkgate_wa)
6007                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6008
6009         if (INTEL_GEN(dev_priv) >= 9)
6010                 skylake_pfit_enable(pipe_config);
6011         else
6012                 ironlake_pfit_enable(pipe_config);
6013
6014         /*
6015          * On ILK+ LUT must be loaded before the pipe is running but with
6016          * clocks enabled
6017          */
6018         intel_color_load_luts(pipe_config);
6019         intel_color_commit(pipe_config);
6020
6021         if (INTEL_GEN(dev_priv) >= 11)
6022                 icl_set_pipe_chicken(intel_crtc);
6023
6024         intel_ddi_set_pipe_settings(pipe_config);
6025         if (!transcoder_is_dsi(cpu_transcoder))
6026                 intel_ddi_enable_transcoder_func(pipe_config);
6027
6028         if (dev_priv->display.initial_watermarks != NULL)
6029                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6030
6031         if (INTEL_GEN(dev_priv) >= 11)
6032                 icl_pipe_mbus_enable(intel_crtc);
6033
6034         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6035         if (!transcoder_is_dsi(cpu_transcoder))
6036                 intel_enable_pipe(pipe_config);
6037
6038         if (pipe_config->has_pch_encoder)
6039                 lpt_pch_enable(old_intel_state, pipe_config);
6040
6041         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6042                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6043
6044         assert_vblank_disabled(crtc);
6045         intel_crtc_vblank_on(pipe_config);
6046
6047         intel_encoders_enable(crtc, pipe_config, old_state);
6048
6049         if (psl_clkgate_wa) {
6050                 intel_wait_for_vblank(dev_priv, pipe);
6051                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6052         }
6053
6054         /* If we change the relative order between pipe/planes enabling, we need
6055          * to change the workaround. */
6056         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6057         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6058                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6059                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6060         }
6061 }
6062
6063 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6064 {
6065         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6066         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6067         enum pipe pipe = crtc->pipe;
6068
6069         /* To avoid upsetting the power well on haswell only disable the pfit if
6070          * it's in use. The hw state code will make sure we get this right. */
6071         if (old_crtc_state->pch_pfit.enabled) {
6072                 I915_WRITE(PF_CTL(pipe), 0);
6073                 I915_WRITE(PF_WIN_POS(pipe), 0);
6074                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6075         }
6076 }
6077
6078 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6079                                   struct drm_atomic_state *old_state)
6080 {
6081         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6082         struct drm_device *dev = crtc->dev;
6083         struct drm_i915_private *dev_priv = to_i915(dev);
6084         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6085         int pipe = intel_crtc->pipe;
6086
6087         /*
6088          * Sometimes spurious CPU pipe underruns happen when the
6089          * pipe is already disabled, but FDI RX/TX is still enabled.
6090          * Happens at least with VGA+HDMI cloning. Suppress them.
6091          */
6092         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6093         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6094
6095         intel_encoders_disable(crtc, old_crtc_state, old_state);
6096
6097         drm_crtc_vblank_off(crtc);
6098         assert_vblank_disabled(crtc);
6099
6100         intel_disable_pipe(old_crtc_state);
6101
6102         ironlake_pfit_disable(old_crtc_state);
6103
6104         if (old_crtc_state->has_pch_encoder)
6105                 ironlake_fdi_disable(crtc);
6106
6107         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6108
6109         if (old_crtc_state->has_pch_encoder) {
6110                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6111
6112                 if (HAS_PCH_CPT(dev_priv)) {
6113                         i915_reg_t reg;
6114                         u32 temp;
6115
6116                         /* disable TRANS_DP_CTL */
6117                         reg = TRANS_DP_CTL(pipe);
6118                         temp = I915_READ(reg);
6119                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6120                                   TRANS_DP_PORT_SEL_MASK);
6121                         temp |= TRANS_DP_PORT_SEL_NONE;
6122                         I915_WRITE(reg, temp);
6123
6124                         /* disable DPLL_SEL */
6125                         temp = I915_READ(PCH_DPLL_SEL);
6126                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6127                         I915_WRITE(PCH_DPLL_SEL, temp);
6128                 }
6129
6130                 ironlake_fdi_pll_disable(intel_crtc);
6131         }
6132
6133         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6134         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6135 }
6136
6137 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6138                                  struct drm_atomic_state *old_state)
6139 {
6140         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6141         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6142         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6143         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6144
6145         intel_encoders_disable(crtc, old_crtc_state, old_state);
6146
6147         drm_crtc_vblank_off(crtc);
6148         assert_vblank_disabled(crtc);
6149
6150         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6151         if (!transcoder_is_dsi(cpu_transcoder))
6152                 intel_disable_pipe(old_crtc_state);
6153
6154         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6155                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6156
6157         if (!transcoder_is_dsi(cpu_transcoder))
6158                 intel_ddi_disable_transcoder_func(old_crtc_state);
6159
6160         intel_dsc_disable(old_crtc_state);
6161
6162         if (INTEL_GEN(dev_priv) >= 9)
6163                 skylake_scaler_disable(intel_crtc);
6164         else
6165                 ironlake_pfit_disable(old_crtc_state);
6166
6167         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6168
6169         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6170 }
6171
6172 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6173 {
6174         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6175         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6176
6177         if (!crtc_state->gmch_pfit.control)
6178                 return;
6179
6180         /*
6181          * The panel fitter should only be adjusted whilst the pipe is disabled,
6182          * according to register description and PRM.
6183          */
6184         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6185         assert_pipe_disabled(dev_priv, crtc->pipe);
6186
6187         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6188         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6189
6190         /* Border color in case we don't scale up to the full screen. Black by
6191          * default, change to something else for debugging. */
6192         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6193 }
6194
6195 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6196 {
6197         if (port == PORT_NONE)
6198                 return false;
6199
6200         if (IS_ICELAKE(dev_priv))
6201                 return port <= PORT_B;
6202
6203         return false;
6204 }
6205
6206 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6207 {
6208         if (IS_ICELAKE(dev_priv))
6209                 return port >= PORT_C && port <= PORT_F;
6210
6211         return false;
6212 }
6213
6214 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6215 {
6216         if (!intel_port_is_tc(dev_priv, port))
6217                 return PORT_TC_NONE;
6218
6219         return port - PORT_C;
6220 }
6221
6222 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6223 {
6224         switch (port) {
6225         case PORT_A:
6226                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6227         case PORT_B:
6228                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6229         case PORT_C:
6230                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6231         case PORT_D:
6232                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6233         case PORT_E:
6234                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6235         case PORT_F:
6236                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6237         default:
6238                 MISSING_CASE(port);
6239                 return POWER_DOMAIN_PORT_OTHER;
6240         }
6241 }
6242
6243 enum intel_display_power_domain
6244 intel_aux_power_domain(struct intel_digital_port *dig_port)
6245 {
6246         switch (dig_port->aux_ch) {
6247         case AUX_CH_A:
6248                 return POWER_DOMAIN_AUX_A;
6249         case AUX_CH_B:
6250                 return POWER_DOMAIN_AUX_B;
6251         case AUX_CH_C:
6252                 return POWER_DOMAIN_AUX_C;
6253         case AUX_CH_D:
6254                 return POWER_DOMAIN_AUX_D;
6255         case AUX_CH_E:
6256                 return POWER_DOMAIN_AUX_E;
6257         case AUX_CH_F:
6258                 return POWER_DOMAIN_AUX_F;
6259         default:
6260                 MISSING_CASE(dig_port->aux_ch);
6261                 return POWER_DOMAIN_AUX_A;
6262         }
6263 }
6264
6265 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6266                                   struct intel_crtc_state *crtc_state)
6267 {
6268         struct drm_device *dev = crtc->dev;
6269         struct drm_i915_private *dev_priv = to_i915(dev);
6270         struct drm_encoder *encoder;
6271         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6272         enum pipe pipe = intel_crtc->pipe;
6273         u64 mask;
6274         enum transcoder transcoder = crtc_state->cpu_transcoder;
6275
6276         if (!crtc_state->base.active)
6277                 return 0;
6278
6279         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6280         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6281         if (crtc_state->pch_pfit.enabled ||
6282             crtc_state->pch_pfit.force_thru)
6283                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6284
6285         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6286                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6287
6288                 mask |= BIT_ULL(intel_encoder->power_domain);
6289         }
6290
6291         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6292                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6293
6294         if (crtc_state->shared_dpll)
6295                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
6296
6297         return mask;
6298 }
6299
6300 static u64
6301 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6302                                struct intel_crtc_state *crtc_state)
6303 {
6304         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6305         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6306         enum intel_display_power_domain domain;
6307         u64 domains, new_domains, old_domains;
6308
6309         old_domains = intel_crtc->enabled_power_domains;
6310         intel_crtc->enabled_power_domains = new_domains =
6311                 get_crtc_power_domains(crtc, crtc_state);
6312
6313         domains = new_domains & ~old_domains;
6314
6315         for_each_power_domain(domain, domains)
6316                 intel_display_power_get(dev_priv, domain);
6317
6318         return old_domains & ~new_domains;
6319 }
6320
6321 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6322                                       u64 domains)
6323 {
6324         enum intel_display_power_domain domain;
6325
6326         for_each_power_domain(domain, domains)
6327                 intel_display_power_put_unchecked(dev_priv, domain);
6328 }
6329
6330 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6331                                    struct drm_atomic_state *old_state)
6332 {
6333         struct intel_atomic_state *old_intel_state =
6334                 to_intel_atomic_state(old_state);
6335         struct drm_crtc *crtc = pipe_config->base.crtc;
6336         struct drm_device *dev = crtc->dev;
6337         struct drm_i915_private *dev_priv = to_i915(dev);
6338         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6339         int pipe = intel_crtc->pipe;
6340
6341         if (WARN_ON(intel_crtc->active))
6342                 return;
6343
6344         if (intel_crtc_has_dp_encoder(pipe_config))
6345                 intel_dp_set_m_n(pipe_config, M1_N1);
6346
6347         intel_set_pipe_timings(pipe_config);
6348         intel_set_pipe_src_size(pipe_config);
6349
6350         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6351                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6352                 I915_WRITE(CHV_CANVAS(pipe), 0);
6353         }
6354
6355         i9xx_set_pipeconf(pipe_config);
6356
6357         intel_crtc->active = true;
6358
6359         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6360
6361         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6362
6363         if (IS_CHERRYVIEW(dev_priv)) {
6364                 chv_prepare_pll(intel_crtc, pipe_config);
6365                 chv_enable_pll(intel_crtc, pipe_config);
6366         } else {
6367                 vlv_prepare_pll(intel_crtc, pipe_config);
6368                 vlv_enable_pll(intel_crtc, pipe_config);
6369         }
6370
6371         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6372
6373         i9xx_pfit_enable(pipe_config);
6374
6375         intel_color_load_luts(pipe_config);
6376         intel_color_commit(pipe_config);
6377
6378         dev_priv->display.initial_watermarks(old_intel_state,
6379                                              pipe_config);
6380         intel_enable_pipe(pipe_config);
6381
6382         assert_vblank_disabled(crtc);
6383         intel_crtc_vblank_on(pipe_config);
6384
6385         intel_encoders_enable(crtc, pipe_config, old_state);
6386 }
6387
6388 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6389 {
6390         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6391         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6392
6393         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6394         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6395 }
6396
6397 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6398                              struct drm_atomic_state *old_state)
6399 {
6400         struct intel_atomic_state *old_intel_state =
6401                 to_intel_atomic_state(old_state);
6402         struct drm_crtc *crtc = pipe_config->base.crtc;
6403         struct drm_device *dev = crtc->dev;
6404         struct drm_i915_private *dev_priv = to_i915(dev);
6405         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6406         enum pipe pipe = intel_crtc->pipe;
6407
6408         if (WARN_ON(intel_crtc->active))
6409                 return;
6410
6411         i9xx_set_pll_dividers(pipe_config);
6412
6413         if (intel_crtc_has_dp_encoder(pipe_config))
6414                 intel_dp_set_m_n(pipe_config, M1_N1);
6415
6416         intel_set_pipe_timings(pipe_config);
6417         intel_set_pipe_src_size(pipe_config);
6418
6419         i9xx_set_pipeconf(pipe_config);
6420
6421         intel_crtc->active = true;
6422
6423         if (!IS_GEN(dev_priv, 2))
6424                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6425
6426         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6427
6428         i9xx_enable_pll(intel_crtc, pipe_config);
6429
6430         i9xx_pfit_enable(pipe_config);
6431
6432         intel_color_load_luts(pipe_config);
6433         intel_color_commit(pipe_config);
6434
6435         if (dev_priv->display.initial_watermarks != NULL)
6436                 dev_priv->display.initial_watermarks(old_intel_state,
6437                                                      pipe_config);
6438         else
6439                 intel_update_watermarks(intel_crtc);
6440         intel_enable_pipe(pipe_config);
6441
6442         assert_vblank_disabled(crtc);
6443         intel_crtc_vblank_on(pipe_config);
6444
6445         intel_encoders_enable(crtc, pipe_config, old_state);
6446 }
6447
6448 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6449 {
6450         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6451         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6452
6453         if (!old_crtc_state->gmch_pfit.control)
6454                 return;
6455
6456         assert_pipe_disabled(dev_priv, crtc->pipe);
6457
6458         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6459                       I915_READ(PFIT_CONTROL));
6460         I915_WRITE(PFIT_CONTROL, 0);
6461 }
6462
6463 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6464                               struct drm_atomic_state *old_state)
6465 {
6466         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6467         struct drm_device *dev = crtc->dev;
6468         struct drm_i915_private *dev_priv = to_i915(dev);
6469         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6470         int pipe = intel_crtc->pipe;
6471
6472         /*
6473          * On gen2 planes are double buffered but the pipe isn't, so we must
6474          * wait for planes to fully turn off before disabling the pipe.
6475          */
6476         if (IS_GEN(dev_priv, 2))
6477                 intel_wait_for_vblank(dev_priv, pipe);
6478
6479         intel_encoders_disable(crtc, old_crtc_state, old_state);
6480
6481         drm_crtc_vblank_off(crtc);
6482         assert_vblank_disabled(crtc);
6483
6484         intel_disable_pipe(old_crtc_state);
6485
6486         i9xx_pfit_disable(old_crtc_state);
6487
6488         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6489
6490         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6491                 if (IS_CHERRYVIEW(dev_priv))
6492                         chv_disable_pll(dev_priv, pipe);
6493                 else if (IS_VALLEYVIEW(dev_priv))
6494                         vlv_disable_pll(dev_priv, pipe);
6495                 else
6496                         i9xx_disable_pll(old_crtc_state);
6497         }
6498
6499         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6500
6501         if (!IS_GEN(dev_priv, 2))
6502                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6503
6504         if (!dev_priv->display.initial_watermarks)
6505                 intel_update_watermarks(intel_crtc);
6506
6507         /* clock the pipe down to 640x480@60 to potentially save power */
6508         if (IS_I830(dev_priv))
6509                 i830_enable_pipe(dev_priv, pipe);
6510 }
6511
6512 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6513                                         struct drm_modeset_acquire_ctx *ctx)
6514 {
6515         struct intel_encoder *encoder;
6516         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6517         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6518         enum intel_display_power_domain domain;
6519         struct intel_plane *plane;
6520         u64 domains;
6521         struct drm_atomic_state *state;
6522         struct intel_crtc_state *crtc_state;
6523         int ret;
6524
6525         if (!intel_crtc->active)
6526                 return;
6527
6528         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6529                 const struct intel_plane_state *plane_state =
6530                         to_intel_plane_state(plane->base.state);
6531
6532                 if (plane_state->base.visible)
6533                         intel_plane_disable_noatomic(intel_crtc, plane);
6534         }
6535
6536         state = drm_atomic_state_alloc(crtc->dev);
6537         if (!state) {
6538                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6539                               crtc->base.id, crtc->name);
6540                 return;
6541         }
6542
6543         state->acquire_ctx = ctx;
6544
6545         /* Everything's already locked, -EDEADLK can't happen. */
6546         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6547         ret = drm_atomic_add_affected_connectors(state, crtc);
6548
6549         WARN_ON(IS_ERR(crtc_state) || ret);
6550
6551         dev_priv->display.crtc_disable(crtc_state, state);
6552
6553         drm_atomic_state_put(state);
6554
6555         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6556                       crtc->base.id, crtc->name);
6557
6558         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6559         crtc->state->active = false;
6560         intel_crtc->active = false;
6561         crtc->enabled = false;
6562         crtc->state->connector_mask = 0;
6563         crtc->state->encoder_mask = 0;
6564
6565         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6566                 encoder->base.crtc = NULL;
6567
6568         intel_fbc_disable(intel_crtc);
6569         intel_update_watermarks(intel_crtc);
6570         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6571
6572         domains = intel_crtc->enabled_power_domains;
6573         for_each_power_domain(domain, domains)
6574                 intel_display_power_put_unchecked(dev_priv, domain);
6575         intel_crtc->enabled_power_domains = 0;
6576
6577         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6578         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6579         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6580 }
6581
6582 /*
6583  * turn all crtc's off, but do not adjust state
6584  * This has to be paired with a call to intel_modeset_setup_hw_state.
6585  */
6586 int intel_display_suspend(struct drm_device *dev)
6587 {
6588         struct drm_i915_private *dev_priv = to_i915(dev);
6589         struct drm_atomic_state *state;
6590         int ret;
6591
6592         state = drm_atomic_helper_suspend(dev);
6593         ret = PTR_ERR_OR_ZERO(state);
6594         if (ret)
6595                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6596         else
6597                 dev_priv->modeset_restore_state = state;
6598         return ret;
6599 }
6600
6601 void intel_encoder_destroy(struct drm_encoder *encoder)
6602 {
6603         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6604
6605         drm_encoder_cleanup(encoder);
6606         kfree(intel_encoder);
6607 }
6608
6609 /* Cross check the actual hw state with our own modeset state tracking (and it's
6610  * internal consistency). */
6611 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6612                                          struct drm_connector_state *conn_state)
6613 {
6614         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6615
6616         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6617                       connector->base.base.id,
6618                       connector->base.name);
6619
6620         if (connector->get_hw_state(connector)) {
6621                 struct intel_encoder *encoder = connector->encoder;
6622
6623                 I915_STATE_WARN(!crtc_state,
6624                          "connector enabled without attached crtc\n");
6625
6626                 if (!crtc_state)
6627                         return;
6628
6629                 I915_STATE_WARN(!crtc_state->active,
6630                       "connector is active, but attached crtc isn't\n");
6631
6632                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6633                         return;
6634
6635                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6636                         "atomic encoder doesn't match attached encoder\n");
6637
6638                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6639                         "attached encoder crtc differs from connector crtc\n");
6640         } else {
6641                 I915_STATE_WARN(crtc_state && crtc_state->active,
6642                         "attached crtc is active, but connector isn't\n");
6643                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6644                         "best encoder set without crtc!\n");
6645         }
6646 }
6647
6648 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6649 {
6650         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6651                 return crtc_state->fdi_lanes;
6652
6653         return 0;
6654 }
6655
6656 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6657                                      struct intel_crtc_state *pipe_config)
6658 {
6659         struct drm_i915_private *dev_priv = to_i915(dev);
6660         struct drm_atomic_state *state = pipe_config->base.state;
6661         struct intel_crtc *other_crtc;
6662         struct intel_crtc_state *other_crtc_state;
6663
6664         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6665                       pipe_name(pipe), pipe_config->fdi_lanes);
6666         if (pipe_config->fdi_lanes > 4) {
6667                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6668                               pipe_name(pipe), pipe_config->fdi_lanes);
6669                 return -EINVAL;
6670         }
6671
6672         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6673                 if (pipe_config->fdi_lanes > 2) {
6674                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6675                                       pipe_config->fdi_lanes);
6676                         return -EINVAL;
6677                 } else {
6678                         return 0;
6679                 }
6680         }
6681
6682         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6683                 return 0;
6684
6685         /* Ivybridge 3 pipe is really complicated */
6686         switch (pipe) {
6687         case PIPE_A:
6688                 return 0;
6689         case PIPE_B:
6690                 if (pipe_config->fdi_lanes <= 2)
6691                         return 0;
6692
6693                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6694                 other_crtc_state =
6695                         intel_atomic_get_crtc_state(state, other_crtc);
6696                 if (IS_ERR(other_crtc_state))
6697                         return PTR_ERR(other_crtc_state);
6698
6699                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6700                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6701                                       pipe_name(pipe), pipe_config->fdi_lanes);
6702                         return -EINVAL;
6703                 }
6704                 return 0;
6705         case PIPE_C:
6706                 if (pipe_config->fdi_lanes > 2) {
6707                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6708                                       pipe_name(pipe), pipe_config->fdi_lanes);
6709                         return -EINVAL;
6710                 }
6711
6712                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6713                 other_crtc_state =
6714                         intel_atomic_get_crtc_state(state, other_crtc);
6715                 if (IS_ERR(other_crtc_state))
6716                         return PTR_ERR(other_crtc_state);
6717
6718                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6719                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6720                         return -EINVAL;
6721                 }
6722                 return 0;
6723         default:
6724                 BUG();
6725         }
6726 }
6727
6728 #define RETRY 1
6729 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6730                                        struct intel_crtc_state *pipe_config)
6731 {
6732         struct drm_device *dev = intel_crtc->base.dev;
6733         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6734         int lane, link_bw, fdi_dotclock, ret;
6735         bool needs_recompute = false;
6736
6737 retry:
6738         /* FDI is a binary signal running at ~2.7GHz, encoding
6739          * each output octet as 10 bits. The actual frequency
6740          * is stored as a divider into a 100MHz clock, and the
6741          * mode pixel clock is stored in units of 1KHz.
6742          * Hence the bw of each lane in terms of the mode signal
6743          * is:
6744          */
6745         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6746
6747         fdi_dotclock = adjusted_mode->crtc_clock;
6748
6749         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6750                                            pipe_config->pipe_bpp);
6751
6752         pipe_config->fdi_lanes = lane;
6753
6754         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6755                                link_bw, &pipe_config->fdi_m_n, false);
6756
6757         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6758         if (ret == -EDEADLK)
6759                 return ret;
6760
6761         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6762                 pipe_config->pipe_bpp -= 2*3;
6763                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6764                               pipe_config->pipe_bpp);
6765                 needs_recompute = true;
6766                 pipe_config->bw_constrained = true;
6767
6768                 goto retry;
6769         }
6770
6771         if (needs_recompute)
6772                 return RETRY;
6773
6774         return ret;
6775 }
6776
6777 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6778 {
6779         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6780         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6781
6782         /* IPS only exists on ULT machines and is tied to pipe A. */
6783         if (!hsw_crtc_supports_ips(crtc))
6784                 return false;
6785
6786         if (!i915_modparams.enable_ips)
6787                 return false;
6788
6789         if (crtc_state->pipe_bpp > 24)
6790                 return false;
6791
6792         /*
6793          * We compare against max which means we must take
6794          * the increased cdclk requirement into account when
6795          * calculating the new cdclk.
6796          *
6797          * Should measure whether using a lower cdclk w/o IPS
6798          */
6799         if (IS_BROADWELL(dev_priv) &&
6800             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6801                 return false;
6802
6803         return true;
6804 }
6805
6806 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6807 {
6808         struct drm_i915_private *dev_priv =
6809                 to_i915(crtc_state->base.crtc->dev);
6810         struct intel_atomic_state *intel_state =
6811                 to_intel_atomic_state(crtc_state->base.state);
6812
6813         if (!hsw_crtc_state_ips_capable(crtc_state))
6814                 return false;
6815
6816         if (crtc_state->ips_force_disable)
6817                 return false;
6818
6819         /* IPS should be fine as long as at least one plane is enabled. */
6820         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6821                 return false;
6822
6823         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6824         if (IS_BROADWELL(dev_priv) &&
6825             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6826                 return false;
6827
6828         return true;
6829 }
6830
6831 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6832 {
6833         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6834
6835         /* GDG double wide on either pipe, otherwise pipe A only */
6836         return INTEL_GEN(dev_priv) < 4 &&
6837                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6838 }
6839
6840 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6841 {
6842         u32 pixel_rate;
6843
6844         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6845
6846         /*
6847          * We only use IF-ID interlacing. If we ever use
6848          * PF-ID we'll need to adjust the pixel_rate here.
6849          */
6850
6851         if (pipe_config->pch_pfit.enabled) {
6852                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
6853                 u32 pfit_size = pipe_config->pch_pfit.size;
6854
6855                 pipe_w = pipe_config->pipe_src_w;
6856                 pipe_h = pipe_config->pipe_src_h;
6857
6858                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6859                 pfit_h = pfit_size & 0xFFFF;
6860                 if (pipe_w < pfit_w)
6861                         pipe_w = pfit_w;
6862                 if (pipe_h < pfit_h)
6863                         pipe_h = pfit_h;
6864
6865                 if (WARN_ON(!pfit_w || !pfit_h))
6866                         return pixel_rate;
6867
6868                 pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
6869                                      pfit_w * pfit_h);
6870         }
6871
6872         return pixel_rate;
6873 }
6874
6875 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6876 {
6877         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6878
6879         if (HAS_GMCH(dev_priv))
6880                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6881                 crtc_state->pixel_rate =
6882                         crtc_state->base.adjusted_mode.crtc_clock;
6883         else
6884                 crtc_state->pixel_rate =
6885                         ilk_pipe_pixel_rate(crtc_state);
6886 }
6887
6888 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6889                                      struct intel_crtc_state *pipe_config)
6890 {
6891         struct drm_device *dev = crtc->base.dev;
6892         struct drm_i915_private *dev_priv = to_i915(dev);
6893         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6894         int clock_limit = dev_priv->max_dotclk_freq;
6895
6896         if (INTEL_GEN(dev_priv) < 4) {
6897                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6898
6899                 /*
6900                  * Enable double wide mode when the dot clock
6901                  * is > 90% of the (display) core speed.
6902                  */
6903                 if (intel_crtc_supports_double_wide(crtc) &&
6904                     adjusted_mode->crtc_clock > clock_limit) {
6905                         clock_limit = dev_priv->max_dotclk_freq;
6906                         pipe_config->double_wide = true;
6907                 }
6908         }
6909
6910         if (adjusted_mode->crtc_clock > clock_limit) {
6911                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6912                               adjusted_mode->crtc_clock, clock_limit,
6913                               yesno(pipe_config->double_wide));
6914                 return -EINVAL;
6915         }
6916
6917         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6918              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6919              pipe_config->base.ctm) {
6920                 /*
6921                  * There is only one pipe CSC unit per pipe, and we need that
6922                  * for output conversion from RGB->YCBCR. So if CTM is already
6923                  * applied we can't support YCBCR420 output.
6924                  */
6925                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6926                 return -EINVAL;
6927         }
6928
6929         /*
6930          * Pipe horizontal size must be even in:
6931          * - DVO ganged mode
6932          * - LVDS dual channel mode
6933          * - Double wide pipe
6934          */
6935         if (pipe_config->pipe_src_w & 1) {
6936                 if (pipe_config->double_wide) {
6937                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6938                         return -EINVAL;
6939                 }
6940
6941                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6942                     intel_is_dual_link_lvds(dev)) {
6943                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6944                         return -EINVAL;
6945                 }
6946         }
6947
6948         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6949          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6950          */
6951         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6952                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6953                 return -EINVAL;
6954
6955         intel_crtc_compute_pixel_rate(pipe_config);
6956
6957         if (pipe_config->has_pch_encoder)
6958                 return ironlake_fdi_compute_config(crtc, pipe_config);
6959
6960         return 0;
6961 }
6962
6963 static void
6964 intel_reduce_m_n_ratio(u32 *num, u32 *den)
6965 {
6966         while (*num > DATA_LINK_M_N_MASK ||
6967                *den > DATA_LINK_M_N_MASK) {
6968                 *num >>= 1;
6969                 *den >>= 1;
6970         }
6971 }
6972
6973 static void compute_m_n(unsigned int m, unsigned int n,
6974                         u32 *ret_m, u32 *ret_n,
6975                         bool constant_n)
6976 {
6977         /*
6978          * Several DP dongles in particular seem to be fussy about
6979          * too large link M/N values. Give N value as 0x8000 that
6980          * should be acceptable by specific devices. 0x8000 is the
6981          * specified fixed N value for asynchronous clock mode,
6982          * which the devices expect also in synchronous clock mode.
6983          */
6984         if (constant_n)
6985                 *ret_n = 0x8000;
6986         else
6987                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6988
6989         *ret_m = div_u64((u64)m * *ret_n, n);
6990         intel_reduce_m_n_ratio(ret_m, ret_n);
6991 }
6992
6993 void
6994 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
6995                        int pixel_clock, int link_clock,
6996                        struct intel_link_m_n *m_n,
6997                        bool constant_n)
6998 {
6999         m_n->tu = 64;
7000
7001         compute_m_n(bits_per_pixel * pixel_clock,
7002                     link_clock * nlanes * 8,
7003                     &m_n->gmch_m, &m_n->gmch_n,
7004                     constant_n);
7005
7006         compute_m_n(pixel_clock, link_clock,
7007                     &m_n->link_m, &m_n->link_n,
7008                     constant_n);
7009 }
7010
7011 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7012 {
7013         if (i915_modparams.panel_use_ssc >= 0)
7014                 return i915_modparams.panel_use_ssc != 0;
7015         return dev_priv->vbt.lvds_use_ssc
7016                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7017 }
7018
7019 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7020 {
7021         return (1 << dpll->n) << 16 | dpll->m2;
7022 }
7023
7024 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7025 {
7026         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7027 }
7028
7029 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7030                                      struct intel_crtc_state *crtc_state,
7031                                      struct dpll *reduced_clock)
7032 {
7033         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7034         u32 fp, fp2 = 0;
7035
7036         if (IS_PINEVIEW(dev_priv)) {
7037                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7038                 if (reduced_clock)
7039                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7040         } else {
7041                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7042                 if (reduced_clock)
7043                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7044         }
7045
7046         crtc_state->dpll_hw_state.fp0 = fp;
7047
7048         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7049             reduced_clock) {
7050                 crtc_state->dpll_hw_state.fp1 = fp2;
7051         } else {
7052                 crtc_state->dpll_hw_state.fp1 = fp;
7053         }
7054 }
7055
7056 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7057                 pipe)
7058 {
7059         u32 reg_val;
7060
7061         /*
7062          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7063          * and set it to a reasonable value instead.
7064          */
7065         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7066         reg_val &= 0xffffff00;
7067         reg_val |= 0x00000030;
7068         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7069
7070         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7071         reg_val &= 0x00ffffff;
7072         reg_val |= 0x8c000000;
7073         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7074
7075         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7076         reg_val &= 0xffffff00;
7077         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7078
7079         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7080         reg_val &= 0x00ffffff;
7081         reg_val |= 0xb0000000;
7082         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7083 }
7084
7085 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7086                                          const struct intel_link_m_n *m_n)
7087 {
7088         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7089         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7090         enum pipe pipe = crtc->pipe;
7091
7092         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7093         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7094         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7095         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7096 }
7097
7098 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7099                                  enum transcoder transcoder)
7100 {
7101         if (IS_HASWELL(dev_priv))
7102                 return transcoder == TRANSCODER_EDP;
7103
7104         /*
7105          * Strictly speaking some registers are available before
7106          * gen7, but we only support DRRS on gen7+
7107          */
7108         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7109 }
7110
7111 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7112                                          const struct intel_link_m_n *m_n,
7113                                          const struct intel_link_m_n *m2_n2)
7114 {
7115         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7116         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7117         enum pipe pipe = crtc->pipe;
7118         enum transcoder transcoder = crtc_state->cpu_transcoder;
7119
7120         if (INTEL_GEN(dev_priv) >= 5) {
7121                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7122                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7123                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7124                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7125                 /*
7126                  *  M2_N2 registers are set only if DRRS is supported
7127                  * (to make sure the registers are not unnecessarily accessed).
7128                  */
7129                 if (m2_n2 && crtc_state->has_drrs &&
7130                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7131                         I915_WRITE(PIPE_DATA_M2(transcoder),
7132                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7133                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7134                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7135                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7136                 }
7137         } else {
7138                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7139                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7140                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7141                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7142         }
7143 }
7144
7145 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7146 {
7147         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7148
7149         if (m_n == M1_N1) {
7150                 dp_m_n = &crtc_state->dp_m_n;
7151                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7152         } else if (m_n == M2_N2) {
7153
7154                 /*
7155                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7156                  * needs to be programmed into M1_N1.
7157                  */
7158                 dp_m_n = &crtc_state->dp_m2_n2;
7159         } else {
7160                 DRM_ERROR("Unsupported divider value\n");
7161                 return;
7162         }
7163
7164         if (crtc_state->has_pch_encoder)
7165                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7166         else
7167                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7168 }
7169
7170 static void vlv_compute_dpll(struct intel_crtc *crtc,
7171                              struct intel_crtc_state *pipe_config)
7172 {
7173         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7174                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7175         if (crtc->pipe != PIPE_A)
7176                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7177
7178         /* DPLL not used with DSI, but still need the rest set up */
7179         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7180                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7181                         DPLL_EXT_BUFFER_ENABLE_VLV;
7182
7183         pipe_config->dpll_hw_state.dpll_md =
7184                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7185 }
7186
7187 static void chv_compute_dpll(struct intel_crtc *crtc,
7188                              struct intel_crtc_state *pipe_config)
7189 {
7190         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7191                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7192         if (crtc->pipe != PIPE_A)
7193                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7194
7195         /* DPLL not used with DSI, but still need the rest set up */
7196         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7197                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7198
7199         pipe_config->dpll_hw_state.dpll_md =
7200                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7201 }
7202
7203 static void vlv_prepare_pll(struct intel_crtc *crtc,
7204                             const struct intel_crtc_state *pipe_config)
7205 {
7206         struct drm_device *dev = crtc->base.dev;
7207         struct drm_i915_private *dev_priv = to_i915(dev);
7208         enum pipe pipe = crtc->pipe;
7209         u32 mdiv;
7210         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7211         u32 coreclk, reg_val;
7212
7213         /* Enable Refclk */
7214         I915_WRITE(DPLL(pipe),
7215                    pipe_config->dpll_hw_state.dpll &
7216                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7217
7218         /* No need to actually set up the DPLL with DSI */
7219         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7220                 return;
7221
7222         mutex_lock(&dev_priv->sb_lock);
7223
7224         bestn = pipe_config->dpll.n;
7225         bestm1 = pipe_config->dpll.m1;
7226         bestm2 = pipe_config->dpll.m2;
7227         bestp1 = pipe_config->dpll.p1;
7228         bestp2 = pipe_config->dpll.p2;
7229
7230         /* See eDP HDMI DPIO driver vbios notes doc */
7231
7232         /* PLL B needs special handling */
7233         if (pipe == PIPE_B)
7234                 vlv_pllb_recal_opamp(dev_priv, pipe);
7235
7236         /* Set up Tx target for periodic Rcomp update */
7237         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7238
7239         /* Disable target IRef on PLL */
7240         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7241         reg_val &= 0x00ffffff;
7242         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7243
7244         /* Disable fast lock */
7245         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7246
7247         /* Set idtafcrecal before PLL is enabled */
7248         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7249         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7250         mdiv |= ((bestn << DPIO_N_SHIFT));
7251         mdiv |= (1 << DPIO_K_SHIFT);
7252
7253         /*
7254          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7255          * but we don't support that).
7256          * Note: don't use the DAC post divider as it seems unstable.
7257          */
7258         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7259         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7260
7261         mdiv |= DPIO_ENABLE_CALIBRATION;
7262         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7263
7264         /* Set HBR and RBR LPF coefficients */
7265         if (pipe_config->port_clock == 162000 ||
7266             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7267             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7268                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7269                                  0x009f0003);
7270         else
7271                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7272                                  0x00d0000f);
7273
7274         if (intel_crtc_has_dp_encoder(pipe_config)) {
7275                 /* Use SSC source */
7276                 if (pipe == PIPE_A)
7277                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7278                                          0x0df40000);
7279                 else
7280                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7281                                          0x0df70000);
7282         } else { /* HDMI or VGA */
7283                 /* Use bend source */
7284                 if (pipe == PIPE_A)
7285                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7286                                          0x0df70000);
7287                 else
7288                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7289                                          0x0df40000);
7290         }
7291
7292         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7293         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7294         if (intel_crtc_has_dp_encoder(pipe_config))
7295                 coreclk |= 0x01000000;
7296         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7297
7298         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7299         mutex_unlock(&dev_priv->sb_lock);
7300 }
7301
7302 static void chv_prepare_pll(struct intel_crtc *crtc,
7303                             const struct intel_crtc_state *pipe_config)
7304 {
7305         struct drm_device *dev = crtc->base.dev;
7306         struct drm_i915_private *dev_priv = to_i915(dev);
7307         enum pipe pipe = crtc->pipe;
7308         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7309         u32 loopfilter, tribuf_calcntr;
7310         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7311         u32 dpio_val;
7312         int vco;
7313
7314         /* Enable Refclk and SSC */
7315         I915_WRITE(DPLL(pipe),
7316                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7317
7318         /* No need to actually set up the DPLL with DSI */
7319         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7320                 return;
7321
7322         bestn = pipe_config->dpll.n;
7323         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7324         bestm1 = pipe_config->dpll.m1;
7325         bestm2 = pipe_config->dpll.m2 >> 22;
7326         bestp1 = pipe_config->dpll.p1;
7327         bestp2 = pipe_config->dpll.p2;
7328         vco = pipe_config->dpll.vco;
7329         dpio_val = 0;
7330         loopfilter = 0;
7331
7332         mutex_lock(&dev_priv->sb_lock);
7333
7334         /* p1 and p2 divider */
7335         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7336                         5 << DPIO_CHV_S1_DIV_SHIFT |
7337                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7338                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7339                         1 << DPIO_CHV_K_DIV_SHIFT);
7340
7341         /* Feedback post-divider - m2 */
7342         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7343
7344         /* Feedback refclk divider - n and m1 */
7345         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7346                         DPIO_CHV_M1_DIV_BY_2 |
7347                         1 << DPIO_CHV_N_DIV_SHIFT);
7348
7349         /* M2 fraction division */
7350         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7351
7352         /* M2 fraction division enable */
7353         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7354         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7355         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7356         if (bestm2_frac)
7357                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7358         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7359
7360         /* Program digital lock detect threshold */
7361         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7362         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7363                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7364         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7365         if (!bestm2_frac)
7366                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7367         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7368
7369         /* Loop filter */
7370         if (vco == 5400000) {
7371                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7372                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7373                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7374                 tribuf_calcntr = 0x9;
7375         } else if (vco <= 6200000) {
7376                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7377                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7378                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7379                 tribuf_calcntr = 0x9;
7380         } else if (vco <= 6480000) {
7381                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7382                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7383                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7384                 tribuf_calcntr = 0x8;
7385         } else {
7386                 /* Not supported. Apply the same limits as in the max case */
7387                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7388                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7389                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7390                 tribuf_calcntr = 0;
7391         }
7392         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7393
7394         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7395         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7396         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7397         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7398
7399         /* AFC Recal */
7400         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7401                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7402                         DPIO_AFC_RECAL);
7403
7404         mutex_unlock(&dev_priv->sb_lock);
7405 }
7406
7407 /**
7408  * vlv_force_pll_on - forcibly enable just the PLL
7409  * @dev_priv: i915 private structure
7410  * @pipe: pipe PLL to enable
7411  * @dpll: PLL configuration
7412  *
7413  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7414  * in cases where we need the PLL enabled even when @pipe is not going to
7415  * be enabled.
7416  */
7417 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7418                      const struct dpll *dpll)
7419 {
7420         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7421         struct intel_crtc_state *pipe_config;
7422
7423         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7424         if (!pipe_config)
7425                 return -ENOMEM;
7426
7427         pipe_config->base.crtc = &crtc->base;
7428         pipe_config->pixel_multiplier = 1;
7429         pipe_config->dpll = *dpll;
7430
7431         if (IS_CHERRYVIEW(dev_priv)) {
7432                 chv_compute_dpll(crtc, pipe_config);
7433                 chv_prepare_pll(crtc, pipe_config);
7434                 chv_enable_pll(crtc, pipe_config);
7435         } else {
7436                 vlv_compute_dpll(crtc, pipe_config);
7437                 vlv_prepare_pll(crtc, pipe_config);
7438                 vlv_enable_pll(crtc, pipe_config);
7439         }
7440
7441         kfree(pipe_config);
7442
7443         return 0;
7444 }
7445
7446 /**
7447  * vlv_force_pll_off - forcibly disable just the PLL
7448  * @dev_priv: i915 private structure
7449  * @pipe: pipe PLL to disable
7450  *
7451  * Disable the PLL for @pipe. To be used in cases where we need
7452  * the PLL enabled even when @pipe is not going to be enabled.
7453  */
7454 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7455 {
7456         if (IS_CHERRYVIEW(dev_priv))
7457                 chv_disable_pll(dev_priv, pipe);
7458         else
7459                 vlv_disable_pll(dev_priv, pipe);
7460 }
7461
7462 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7463                               struct intel_crtc_state *crtc_state,
7464                               struct dpll *reduced_clock)
7465 {
7466         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7467         u32 dpll;
7468         struct dpll *clock = &crtc_state->dpll;
7469
7470         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7471
7472         dpll = DPLL_VGA_MODE_DIS;
7473
7474         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7475                 dpll |= DPLLB_MODE_LVDS;
7476         else
7477                 dpll |= DPLLB_MODE_DAC_SERIAL;
7478
7479         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7480             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7481                 dpll |= (crtc_state->pixel_multiplier - 1)
7482                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7483         }
7484
7485         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7486             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7487                 dpll |= DPLL_SDVO_HIGH_SPEED;
7488
7489         if (intel_crtc_has_dp_encoder(crtc_state))
7490                 dpll |= DPLL_SDVO_HIGH_SPEED;
7491
7492         /* compute bitmask from p1 value */
7493         if (IS_PINEVIEW(dev_priv))
7494                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7495         else {
7496                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7497                 if (IS_G4X(dev_priv) && reduced_clock)
7498                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7499         }
7500         switch (clock->p2) {
7501         case 5:
7502                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7503                 break;
7504         case 7:
7505                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7506                 break;
7507         case 10:
7508                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7509                 break;
7510         case 14:
7511                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7512                 break;
7513         }
7514         if (INTEL_GEN(dev_priv) >= 4)
7515                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7516
7517         if (crtc_state->sdvo_tv_clock)
7518                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7519         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7520                  intel_panel_use_ssc(dev_priv))
7521                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7522         else
7523                 dpll |= PLL_REF_INPUT_DREFCLK;
7524
7525         dpll |= DPLL_VCO_ENABLE;
7526         crtc_state->dpll_hw_state.dpll = dpll;
7527
7528         if (INTEL_GEN(dev_priv) >= 4) {
7529                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7530                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7531                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7532         }
7533 }
7534
7535 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7536                               struct intel_crtc_state *crtc_state,
7537                               struct dpll *reduced_clock)
7538 {
7539         struct drm_device *dev = crtc->base.dev;
7540         struct drm_i915_private *dev_priv = to_i915(dev);
7541         u32 dpll;
7542         struct dpll *clock = &crtc_state->dpll;
7543
7544         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7545
7546         dpll = DPLL_VGA_MODE_DIS;
7547
7548         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7549                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7550         } else {
7551                 if (clock->p1 == 2)
7552                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7553                 else
7554                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7555                 if (clock->p2 == 4)
7556                         dpll |= PLL_P2_DIVIDE_BY_4;
7557         }
7558
7559         if (!IS_I830(dev_priv) &&
7560             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7561                 dpll |= DPLL_DVO_2X_MODE;
7562
7563         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7564             intel_panel_use_ssc(dev_priv))
7565                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7566         else
7567                 dpll |= PLL_REF_INPUT_DREFCLK;
7568
7569         dpll |= DPLL_VCO_ENABLE;
7570         crtc_state->dpll_hw_state.dpll = dpll;
7571 }
7572
7573 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7574 {
7575         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7576         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7577         enum pipe pipe = crtc->pipe;
7578         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7579         const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7580         u32 crtc_vtotal, crtc_vblank_end;
7581         int vsyncshift = 0;
7582
7583         /* We need to be careful not to changed the adjusted mode, for otherwise
7584          * the hw state checker will get angry at the mismatch. */
7585         crtc_vtotal = adjusted_mode->crtc_vtotal;
7586         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7587
7588         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7589                 /* the chip adds 2 halflines automatically */
7590                 crtc_vtotal -= 1;
7591                 crtc_vblank_end -= 1;
7592
7593                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7594                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7595                 else
7596                         vsyncshift = adjusted_mode->crtc_hsync_start -
7597                                 adjusted_mode->crtc_htotal / 2;
7598                 if (vsyncshift < 0)
7599                         vsyncshift += adjusted_mode->crtc_htotal;
7600         }
7601
7602         if (INTEL_GEN(dev_priv) > 3)
7603                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7604
7605         I915_WRITE(HTOTAL(cpu_transcoder),
7606                    (adjusted_mode->crtc_hdisplay - 1) |
7607                    ((adjusted_mode->crtc_htotal - 1) << 16));
7608         I915_WRITE(HBLANK(cpu_transcoder),
7609                    (adjusted_mode->crtc_hblank_start - 1) |
7610                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7611         I915_WRITE(HSYNC(cpu_transcoder),
7612                    (adjusted_mode->crtc_hsync_start - 1) |
7613                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7614
7615         I915_WRITE(VTOTAL(cpu_transcoder),
7616                    (adjusted_mode->crtc_vdisplay - 1) |
7617                    ((crtc_vtotal - 1) << 16));
7618         I915_WRITE(VBLANK(cpu_transcoder),
7619                    (adjusted_mode->crtc_vblank_start - 1) |
7620                    ((crtc_vblank_end - 1) << 16));
7621         I915_WRITE(VSYNC(cpu_transcoder),
7622                    (adjusted_mode->crtc_vsync_start - 1) |
7623                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7624
7625         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7626          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7627          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7628          * bits. */
7629         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7630             (pipe == PIPE_B || pipe == PIPE_C))
7631                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7632
7633 }
7634
7635 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
7636 {
7637         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7638         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7639         enum pipe pipe = crtc->pipe;
7640
7641         /* pipesrc controls the size that is scaled from, which should
7642          * always be the user's requested size.
7643          */
7644         I915_WRITE(PIPESRC(pipe),
7645                    ((crtc_state->pipe_src_w - 1) << 16) |
7646                    (crtc_state->pipe_src_h - 1));
7647 }
7648
7649 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7650                                    struct intel_crtc_state *pipe_config)
7651 {
7652         struct drm_device *dev = crtc->base.dev;
7653         struct drm_i915_private *dev_priv = to_i915(dev);
7654         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7655         u32 tmp;
7656
7657         tmp = I915_READ(HTOTAL(cpu_transcoder));
7658         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7659         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7660         tmp = I915_READ(HBLANK(cpu_transcoder));
7661         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7662         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7663         tmp = I915_READ(HSYNC(cpu_transcoder));
7664         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7665         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7666
7667         tmp = I915_READ(VTOTAL(cpu_transcoder));
7668         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7669         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7670         tmp = I915_READ(VBLANK(cpu_transcoder));
7671         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7672         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7673         tmp = I915_READ(VSYNC(cpu_transcoder));
7674         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7675         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7676
7677         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7678                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7679                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7680                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7681         }
7682 }
7683
7684 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7685                                     struct intel_crtc_state *pipe_config)
7686 {
7687         struct drm_device *dev = crtc->base.dev;
7688         struct drm_i915_private *dev_priv = to_i915(dev);
7689         u32 tmp;
7690
7691         tmp = I915_READ(PIPESRC(crtc->pipe));
7692         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7693         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7694
7695         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7696         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7697 }
7698
7699 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7700                                  struct intel_crtc_state *pipe_config)
7701 {
7702         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7703         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7704         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7705         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7706
7707         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7708         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7709         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7710         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7711
7712         mode->flags = pipe_config->base.adjusted_mode.flags;
7713         mode->type = DRM_MODE_TYPE_DRIVER;
7714
7715         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7716
7717         mode->hsync = drm_mode_hsync(mode);
7718         mode->vrefresh = drm_mode_vrefresh(mode);
7719         drm_mode_set_name(mode);
7720 }
7721
7722 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
7723 {
7724         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7725         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7726         u32 pipeconf;
7727
7728         pipeconf = 0;
7729
7730         /* we keep both pipes enabled on 830 */
7731         if (IS_I830(dev_priv))
7732                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
7733
7734         if (crtc_state->double_wide)
7735                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7736
7737         /* only g4x and later have fancy bpc/dither controls */
7738         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7739             IS_CHERRYVIEW(dev_priv)) {
7740                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7741                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
7742                         pipeconf |= PIPECONF_DITHER_EN |
7743                                     PIPECONF_DITHER_TYPE_SP;
7744
7745                 switch (crtc_state->pipe_bpp) {
7746                 case 18:
7747                         pipeconf |= PIPECONF_6BPC;
7748                         break;
7749                 case 24:
7750                         pipeconf |= PIPECONF_8BPC;
7751                         break;
7752                 case 30:
7753                         pipeconf |= PIPECONF_10BPC;
7754                         break;
7755                 default:
7756                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7757                         BUG();
7758                 }
7759         }
7760
7761         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7762                 if (INTEL_GEN(dev_priv) < 4 ||
7763                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7764                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7765                 else
7766                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7767         } else
7768                 pipeconf |= PIPECONF_PROGRESSIVE;
7769
7770         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7771              crtc_state->limited_color_range)
7772                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7773
7774         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7775         POSTING_READ(PIPECONF(crtc->pipe));
7776 }
7777
7778 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7779                                    struct intel_crtc_state *crtc_state)
7780 {
7781         struct drm_device *dev = crtc->base.dev;
7782         struct drm_i915_private *dev_priv = to_i915(dev);
7783         const struct intel_limit *limit;
7784         int refclk = 48000;
7785
7786         memset(&crtc_state->dpll_hw_state, 0,
7787                sizeof(crtc_state->dpll_hw_state));
7788
7789         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7790                 if (intel_panel_use_ssc(dev_priv)) {
7791                         refclk = dev_priv->vbt.lvds_ssc_freq;
7792                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7793                 }
7794
7795                 limit = &intel_limits_i8xx_lvds;
7796         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7797                 limit = &intel_limits_i8xx_dvo;
7798         } else {
7799                 limit = &intel_limits_i8xx_dac;
7800         }
7801
7802         if (!crtc_state->clock_set &&
7803             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7804                                  refclk, NULL, &crtc_state->dpll)) {
7805                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7806                 return -EINVAL;
7807         }
7808
7809         i8xx_compute_dpll(crtc, crtc_state, NULL);
7810
7811         return 0;
7812 }
7813
7814 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7815                                   struct intel_crtc_state *crtc_state)
7816 {
7817         struct drm_device *dev = crtc->base.dev;
7818         struct drm_i915_private *dev_priv = to_i915(dev);
7819         const struct intel_limit *limit;
7820         int refclk = 96000;
7821
7822         memset(&crtc_state->dpll_hw_state, 0,
7823                sizeof(crtc_state->dpll_hw_state));
7824
7825         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7826                 if (intel_panel_use_ssc(dev_priv)) {
7827                         refclk = dev_priv->vbt.lvds_ssc_freq;
7828                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7829                 }
7830
7831                 if (intel_is_dual_link_lvds(dev))
7832                         limit = &intel_limits_g4x_dual_channel_lvds;
7833                 else
7834                         limit = &intel_limits_g4x_single_channel_lvds;
7835         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7836                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7837                 limit = &intel_limits_g4x_hdmi;
7838         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7839                 limit = &intel_limits_g4x_sdvo;
7840         } else {
7841                 /* The option is for other outputs */
7842                 limit = &intel_limits_i9xx_sdvo;
7843         }
7844
7845         if (!crtc_state->clock_set &&
7846             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7847                                 refclk, NULL, &crtc_state->dpll)) {
7848                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7849                 return -EINVAL;
7850         }
7851
7852         i9xx_compute_dpll(crtc, crtc_state, NULL);
7853
7854         return 0;
7855 }
7856
7857 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7858                                   struct intel_crtc_state *crtc_state)
7859 {
7860         struct drm_device *dev = crtc->base.dev;
7861         struct drm_i915_private *dev_priv = to_i915(dev);
7862         const struct intel_limit *limit;
7863         int refclk = 96000;
7864
7865         memset(&crtc_state->dpll_hw_state, 0,
7866                sizeof(crtc_state->dpll_hw_state));
7867
7868         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7869                 if (intel_panel_use_ssc(dev_priv)) {
7870                         refclk = dev_priv->vbt.lvds_ssc_freq;
7871                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7872                 }
7873
7874                 limit = &intel_limits_pineview_lvds;
7875         } else {
7876                 limit = &intel_limits_pineview_sdvo;
7877         }
7878
7879         if (!crtc_state->clock_set &&
7880             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7881                                 refclk, NULL, &crtc_state->dpll)) {
7882                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7883                 return -EINVAL;
7884         }
7885
7886         i9xx_compute_dpll(crtc, crtc_state, NULL);
7887
7888         return 0;
7889 }
7890
7891 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7892                                    struct intel_crtc_state *crtc_state)
7893 {
7894         struct drm_device *dev = crtc->base.dev;
7895         struct drm_i915_private *dev_priv = to_i915(dev);
7896         const struct intel_limit *limit;
7897         int refclk = 96000;
7898
7899         memset(&crtc_state->dpll_hw_state, 0,
7900                sizeof(crtc_state->dpll_hw_state));
7901
7902         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7903                 if (intel_panel_use_ssc(dev_priv)) {
7904                         refclk = dev_priv->vbt.lvds_ssc_freq;
7905                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7906                 }
7907
7908                 limit = &intel_limits_i9xx_lvds;
7909         } else {
7910                 limit = &intel_limits_i9xx_sdvo;
7911         }
7912
7913         if (!crtc_state->clock_set &&
7914             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7915                                  refclk, NULL, &crtc_state->dpll)) {
7916                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7917                 return -EINVAL;
7918         }
7919
7920         i9xx_compute_dpll(crtc, crtc_state, NULL);
7921
7922         return 0;
7923 }
7924
7925 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7926                                   struct intel_crtc_state *crtc_state)
7927 {
7928         int refclk = 100000;
7929         const struct intel_limit *limit = &intel_limits_chv;
7930
7931         memset(&crtc_state->dpll_hw_state, 0,
7932                sizeof(crtc_state->dpll_hw_state));
7933
7934         if (!crtc_state->clock_set &&
7935             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7936                                 refclk, NULL, &crtc_state->dpll)) {
7937                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7938                 return -EINVAL;
7939         }
7940
7941         chv_compute_dpll(crtc, crtc_state);
7942
7943         return 0;
7944 }
7945
7946 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7947                                   struct intel_crtc_state *crtc_state)
7948 {
7949         int refclk = 100000;
7950         const struct intel_limit *limit = &intel_limits_vlv;
7951
7952         memset(&crtc_state->dpll_hw_state, 0,
7953                sizeof(crtc_state->dpll_hw_state));
7954
7955         if (!crtc_state->clock_set &&
7956             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7957                                 refclk, NULL, &crtc_state->dpll)) {
7958                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7959                 return -EINVAL;
7960         }
7961
7962         vlv_compute_dpll(crtc, crtc_state);
7963
7964         return 0;
7965 }
7966
7967 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7968                                  struct intel_crtc_state *pipe_config)
7969 {
7970         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7971         u32 tmp;
7972
7973         if (INTEL_GEN(dev_priv) <= 3 &&
7974             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7975                 return;
7976
7977         tmp = I915_READ(PFIT_CONTROL);
7978         if (!(tmp & PFIT_ENABLE))
7979                 return;
7980
7981         /* Check whether the pfit is attached to our pipe. */
7982         if (INTEL_GEN(dev_priv) < 4) {
7983                 if (crtc->pipe != PIPE_B)
7984                         return;
7985         } else {
7986                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7987                         return;
7988         }
7989
7990         pipe_config->gmch_pfit.control = tmp;
7991         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7992 }
7993
7994 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7995                                struct intel_crtc_state *pipe_config)
7996 {
7997         struct drm_device *dev = crtc->base.dev;
7998         struct drm_i915_private *dev_priv = to_i915(dev);
7999         int pipe = pipe_config->cpu_transcoder;
8000         struct dpll clock;
8001         u32 mdiv;
8002         int refclk = 100000;
8003
8004         /* In case of DSI, DPLL will not be used */
8005         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8006                 return;
8007
8008         mutex_lock(&dev_priv->sb_lock);
8009         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8010         mutex_unlock(&dev_priv->sb_lock);
8011
8012         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8013         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8014         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8015         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8016         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8017
8018         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8019 }
8020
8021 static void
8022 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8023                               struct intel_initial_plane_config *plane_config)
8024 {
8025         struct drm_device *dev = crtc->base.dev;
8026         struct drm_i915_private *dev_priv = to_i915(dev);
8027         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8028         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8029         enum pipe pipe;
8030         u32 val, base, offset;
8031         int fourcc, pixel_format;
8032         unsigned int aligned_height;
8033         struct drm_framebuffer *fb;
8034         struct intel_framebuffer *intel_fb;
8035
8036         if (!plane->get_hw_state(plane, &pipe))
8037                 return;
8038
8039         WARN_ON(pipe != crtc->pipe);
8040
8041         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8042         if (!intel_fb) {
8043                 DRM_DEBUG_KMS("failed to alloc fb\n");
8044                 return;
8045         }
8046
8047         fb = &intel_fb->base;
8048
8049         fb->dev = dev;
8050
8051         val = I915_READ(DSPCNTR(i9xx_plane));
8052
8053         if (INTEL_GEN(dev_priv) >= 4) {
8054                 if (val & DISPPLANE_TILED) {
8055                         plane_config->tiling = I915_TILING_X;
8056                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8057                 }
8058
8059                 if (val & DISPPLANE_ROTATE_180)
8060                         plane_config->rotation = DRM_MODE_ROTATE_180;
8061         }
8062
8063         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8064             val & DISPPLANE_MIRROR)
8065                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8066
8067         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8068         fourcc = i9xx_format_to_fourcc(pixel_format);
8069         fb->format = drm_format_info(fourcc);
8070
8071         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8072                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8073                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8074         } else if (INTEL_GEN(dev_priv) >= 4) {
8075                 if (plane_config->tiling)
8076                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8077                 else
8078                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8079                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8080         } else {
8081                 base = I915_READ(DSPADDR(i9xx_plane));
8082         }
8083         plane_config->base = base;
8084
8085         val = I915_READ(PIPESRC(pipe));
8086         fb->width = ((val >> 16) & 0xfff) + 1;
8087         fb->height = ((val >> 0) & 0xfff) + 1;
8088
8089         val = I915_READ(DSPSTRIDE(i9xx_plane));
8090         fb->pitches[0] = val & 0xffffffc0;
8091
8092         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8093
8094         plane_config->size = fb->pitches[0] * aligned_height;
8095
8096         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8097                       crtc->base.name, plane->base.name, fb->width, fb->height,
8098                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8099                       plane_config->size);
8100
8101         plane_config->fb = intel_fb;
8102 }
8103
8104 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8105                                struct intel_crtc_state *pipe_config)
8106 {
8107         struct drm_device *dev = crtc->base.dev;
8108         struct drm_i915_private *dev_priv = to_i915(dev);
8109         int pipe = pipe_config->cpu_transcoder;
8110         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8111         struct dpll clock;
8112         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8113         int refclk = 100000;
8114
8115         /* In case of DSI, DPLL will not be used */
8116         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8117                 return;
8118
8119         mutex_lock(&dev_priv->sb_lock);
8120         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8121         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8122         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8123         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8124         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8125         mutex_unlock(&dev_priv->sb_lock);
8126
8127         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8128         clock.m2 = (pll_dw0 & 0xff) << 22;
8129         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8130                 clock.m2 |= pll_dw2 & 0x3fffff;
8131         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8132         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8133         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8134
8135         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8136 }
8137
8138 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8139                                         struct intel_crtc_state *pipe_config)
8140 {
8141         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8142         enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8143
8144         pipe_config->lspcon_downsampling = false;
8145
8146         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8147                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8148
8149                 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8150                         bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8151                         bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8152
8153                         if (ycbcr420_enabled) {
8154                                 /* We support 4:2:0 in full blend mode only */
8155                                 if (!blend)
8156                                         output = INTEL_OUTPUT_FORMAT_INVALID;
8157                                 else if (!(IS_GEMINILAKE(dev_priv) ||
8158                                            INTEL_GEN(dev_priv) >= 10))
8159                                         output = INTEL_OUTPUT_FORMAT_INVALID;
8160                                 else
8161                                         output = INTEL_OUTPUT_FORMAT_YCBCR420;
8162                         } else {
8163                                 /*
8164                                  * Currently there is no interface defined to
8165                                  * check user preference between RGB/YCBCR444
8166                                  * or YCBCR420. So the only possible case for
8167                                  * YCBCR444 usage is driving YCBCR420 output
8168                                  * with LSPCON, when pipe is configured for
8169                                  * YCBCR444 output and LSPCON takes care of
8170                                  * downsampling it.
8171                                  */
8172                                 pipe_config->lspcon_downsampling = true;
8173                                 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8174                         }
8175                 }
8176         }
8177
8178         pipe_config->output_format = output;
8179 }
8180
8181 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8182                                  struct intel_crtc_state *pipe_config)
8183 {
8184         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8185         enum intel_display_power_domain power_domain;
8186         intel_wakeref_t wakeref;
8187         u32 tmp;
8188         bool ret;
8189
8190         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8191         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8192         if (!wakeref)
8193                 return false;
8194
8195         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8196         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8197         pipe_config->shared_dpll = NULL;
8198
8199         ret = false;
8200
8201         tmp = I915_READ(PIPECONF(crtc->pipe));
8202         if (!(tmp & PIPECONF_ENABLE))
8203                 goto out;
8204
8205         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8206             IS_CHERRYVIEW(dev_priv)) {
8207                 switch (tmp & PIPECONF_BPC_MASK) {
8208                 case PIPECONF_6BPC:
8209                         pipe_config->pipe_bpp = 18;
8210                         break;
8211                 case PIPECONF_8BPC:
8212                         pipe_config->pipe_bpp = 24;
8213                         break;
8214                 case PIPECONF_10BPC:
8215                         pipe_config->pipe_bpp = 30;
8216                         break;
8217                 default:
8218                         break;
8219                 }
8220         }
8221
8222         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8223             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8224                 pipe_config->limited_color_range = true;
8225
8226         if (INTEL_GEN(dev_priv) < 4)
8227                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8228
8229         intel_get_pipe_timings(crtc, pipe_config);
8230         intel_get_pipe_src_size(crtc, pipe_config);
8231
8232         i9xx_get_pfit_config(crtc, pipe_config);
8233
8234         if (INTEL_GEN(dev_priv) >= 4) {
8235                 /* No way to read it out on pipes B and C */
8236                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8237                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8238                 else
8239                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8240                 pipe_config->pixel_multiplier =
8241                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8242                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8243                 pipe_config->dpll_hw_state.dpll_md = tmp;
8244         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8245                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8246                 tmp = I915_READ(DPLL(crtc->pipe));
8247                 pipe_config->pixel_multiplier =
8248                         ((tmp & SDVO_MULTIPLIER_MASK)
8249                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8250         } else {
8251                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8252                  * port and will be fixed up in the encoder->get_config
8253                  * function. */
8254                 pipe_config->pixel_multiplier = 1;
8255         }
8256         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8257         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8258                 /*
8259                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8260                  * on 830. Filter it out here so that we don't
8261                  * report errors due to that.
8262                  */
8263                 if (IS_I830(dev_priv))
8264                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8265
8266                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8267                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8268         } else {
8269                 /* Mask out read-only status bits. */
8270                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8271                                                      DPLL_PORTC_READY_MASK |
8272                                                      DPLL_PORTB_READY_MASK);
8273         }
8274
8275         if (IS_CHERRYVIEW(dev_priv))
8276                 chv_crtc_clock_get(crtc, pipe_config);
8277         else if (IS_VALLEYVIEW(dev_priv))
8278                 vlv_crtc_clock_get(crtc, pipe_config);
8279         else
8280                 i9xx_crtc_clock_get(crtc, pipe_config);
8281
8282         /*
8283          * Normally the dotclock is filled in by the encoder .get_config()
8284          * but in case the pipe is enabled w/o any ports we need a sane
8285          * default.
8286          */
8287         pipe_config->base.adjusted_mode.crtc_clock =
8288                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8289
8290         ret = true;
8291
8292 out:
8293         intel_display_power_put(dev_priv, power_domain, wakeref);
8294
8295         return ret;
8296 }
8297
8298 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8299 {
8300         struct intel_encoder *encoder;
8301         int i;
8302         u32 val, final;
8303         bool has_lvds = false;
8304         bool has_cpu_edp = false;
8305         bool has_panel = false;
8306         bool has_ck505 = false;
8307         bool can_ssc = false;
8308         bool using_ssc_source = false;
8309
8310         /* We need to take the global config into account */
8311         for_each_intel_encoder(&dev_priv->drm, encoder) {
8312                 switch (encoder->type) {
8313                 case INTEL_OUTPUT_LVDS:
8314                         has_panel = true;
8315                         has_lvds = true;
8316                         break;
8317                 case INTEL_OUTPUT_EDP:
8318                         has_panel = true;
8319                         if (encoder->port == PORT_A)
8320                                 has_cpu_edp = true;
8321                         break;
8322                 default:
8323                         break;
8324                 }
8325         }
8326
8327         if (HAS_PCH_IBX(dev_priv)) {
8328                 has_ck505 = dev_priv->vbt.display_clock_mode;
8329                 can_ssc = has_ck505;
8330         } else {
8331                 has_ck505 = false;
8332                 can_ssc = true;
8333         }
8334
8335         /* Check if any DPLLs are using the SSC source */
8336         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8337                 u32 temp = I915_READ(PCH_DPLL(i));
8338
8339                 if (!(temp & DPLL_VCO_ENABLE))
8340                         continue;
8341
8342                 if ((temp & PLL_REF_INPUT_MASK) ==
8343                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8344                         using_ssc_source = true;
8345                         break;
8346                 }
8347         }
8348
8349         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8350                       has_panel, has_lvds, has_ck505, using_ssc_source);
8351
8352         /* Ironlake: try to setup display ref clock before DPLL
8353          * enabling. This is only under driver's control after
8354          * PCH B stepping, previous chipset stepping should be
8355          * ignoring this setting.
8356          */
8357         val = I915_READ(PCH_DREF_CONTROL);
8358
8359         /* As we must carefully and slowly disable/enable each source in turn,
8360          * compute the final state we want first and check if we need to
8361          * make any changes at all.
8362          */
8363         final = val;
8364         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8365         if (has_ck505)
8366                 final |= DREF_NONSPREAD_CK505_ENABLE;
8367         else
8368                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8369
8370         final &= ~DREF_SSC_SOURCE_MASK;
8371         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8372         final &= ~DREF_SSC1_ENABLE;
8373
8374         if (has_panel) {
8375                 final |= DREF_SSC_SOURCE_ENABLE;
8376
8377                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8378                         final |= DREF_SSC1_ENABLE;
8379
8380                 if (has_cpu_edp) {
8381                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8382                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8383                         else
8384                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8385                 } else
8386                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8387         } else if (using_ssc_source) {
8388                 final |= DREF_SSC_SOURCE_ENABLE;
8389                 final |= DREF_SSC1_ENABLE;
8390         }
8391
8392         if (final == val)
8393                 return;
8394
8395         /* Always enable nonspread source */
8396         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8397
8398         if (has_ck505)
8399                 val |= DREF_NONSPREAD_CK505_ENABLE;
8400         else
8401                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8402
8403         if (has_panel) {
8404                 val &= ~DREF_SSC_SOURCE_MASK;
8405                 val |= DREF_SSC_SOURCE_ENABLE;
8406
8407                 /* SSC must be turned on before enabling the CPU output  */
8408                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8409                         DRM_DEBUG_KMS("Using SSC on panel\n");
8410                         val |= DREF_SSC1_ENABLE;
8411                 } else
8412                         val &= ~DREF_SSC1_ENABLE;
8413
8414                 /* Get SSC going before enabling the outputs */
8415                 I915_WRITE(PCH_DREF_CONTROL, val);
8416                 POSTING_READ(PCH_DREF_CONTROL);
8417                 udelay(200);
8418
8419                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8420
8421                 /* Enable CPU source on CPU attached eDP */
8422                 if (has_cpu_edp) {
8423                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8424                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8425                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8426                         } else
8427                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8428                 } else
8429                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8430
8431                 I915_WRITE(PCH_DREF_CONTROL, val);
8432                 POSTING_READ(PCH_DREF_CONTROL);
8433                 udelay(200);
8434         } else {
8435                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8436
8437                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8438
8439                 /* Turn off CPU output */
8440                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8441
8442                 I915_WRITE(PCH_DREF_CONTROL, val);
8443                 POSTING_READ(PCH_DREF_CONTROL);
8444                 udelay(200);
8445
8446                 if (!using_ssc_source) {
8447                         DRM_DEBUG_KMS("Disabling SSC source\n");
8448
8449                         /* Turn off the SSC source */
8450                         val &= ~DREF_SSC_SOURCE_MASK;
8451                         val |= DREF_SSC_SOURCE_DISABLE;
8452
8453                         /* Turn off SSC1 */
8454                         val &= ~DREF_SSC1_ENABLE;
8455
8456                         I915_WRITE(PCH_DREF_CONTROL, val);
8457                         POSTING_READ(PCH_DREF_CONTROL);
8458                         udelay(200);
8459                 }
8460         }
8461
8462         BUG_ON(val != final);
8463 }
8464
8465 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8466 {
8467         u32 tmp;
8468
8469         tmp = I915_READ(SOUTH_CHICKEN2);
8470         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8471         I915_WRITE(SOUTH_CHICKEN2, tmp);
8472
8473         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8474                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8475                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8476
8477         tmp = I915_READ(SOUTH_CHICKEN2);
8478         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8479         I915_WRITE(SOUTH_CHICKEN2, tmp);
8480
8481         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8482                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8483                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8484 }
8485
8486 /* WaMPhyProgramming:hsw */
8487 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8488 {
8489         u32 tmp;
8490
8491         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8492         tmp &= ~(0xFF << 24);
8493         tmp |= (0x12 << 24);
8494         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8495
8496         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8497         tmp |= (1 << 11);
8498         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8499
8500         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8501         tmp |= (1 << 11);
8502         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8503
8504         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8505         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8506         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8507
8508         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8509         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8510         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8511
8512         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8513         tmp &= ~(7 << 13);
8514         tmp |= (5 << 13);
8515         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8516
8517         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8518         tmp &= ~(7 << 13);
8519         tmp |= (5 << 13);
8520         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8521
8522         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8523         tmp &= ~0xFF;
8524         tmp |= 0x1C;
8525         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8526
8527         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8528         tmp &= ~0xFF;
8529         tmp |= 0x1C;
8530         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8531
8532         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8533         tmp &= ~(0xFF << 16);
8534         tmp |= (0x1C << 16);
8535         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8536
8537         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8538         tmp &= ~(0xFF << 16);
8539         tmp |= (0x1C << 16);
8540         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8541
8542         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8543         tmp |= (1 << 27);
8544         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8545
8546         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8547         tmp |= (1 << 27);
8548         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8549
8550         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8551         tmp &= ~(0xF << 28);
8552         tmp |= (4 << 28);
8553         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8554
8555         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8556         tmp &= ~(0xF << 28);
8557         tmp |= (4 << 28);
8558         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8559 }
8560
8561 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8562  * Programming" based on the parameters passed:
8563  * - Sequence to enable CLKOUT_DP
8564  * - Sequence to enable CLKOUT_DP without spread
8565  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8566  */
8567 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8568                                  bool with_spread, bool with_fdi)
8569 {
8570         u32 reg, tmp;
8571
8572         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8573                 with_spread = true;
8574         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8575             with_fdi, "LP PCH doesn't have FDI\n"))
8576                 with_fdi = false;
8577
8578         mutex_lock(&dev_priv->sb_lock);
8579
8580         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8581         tmp &= ~SBI_SSCCTL_DISABLE;
8582         tmp |= SBI_SSCCTL_PATHALT;
8583         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8584
8585         udelay(24);
8586
8587         if (with_spread) {
8588                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8589                 tmp &= ~SBI_SSCCTL_PATHALT;
8590                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8591
8592                 if (with_fdi) {
8593                         lpt_reset_fdi_mphy(dev_priv);
8594                         lpt_program_fdi_mphy(dev_priv);
8595                 }
8596         }
8597
8598         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8599         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8600         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8601         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8602
8603         mutex_unlock(&dev_priv->sb_lock);
8604 }
8605
8606 /* Sequence to disable CLKOUT_DP */
8607 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8608 {
8609         u32 reg, tmp;
8610
8611         mutex_lock(&dev_priv->sb_lock);
8612
8613         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8614         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8615         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8616         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8617
8618         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8619         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8620                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8621                         tmp |= SBI_SSCCTL_PATHALT;
8622                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8623                         udelay(32);
8624                 }
8625                 tmp |= SBI_SSCCTL_DISABLE;
8626                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8627         }
8628
8629         mutex_unlock(&dev_priv->sb_lock);
8630 }
8631
8632 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8633
8634 static const u16 sscdivintphase[] = {
8635         [BEND_IDX( 50)] = 0x3B23,
8636         [BEND_IDX( 45)] = 0x3B23,
8637         [BEND_IDX( 40)] = 0x3C23,
8638         [BEND_IDX( 35)] = 0x3C23,
8639         [BEND_IDX( 30)] = 0x3D23,
8640         [BEND_IDX( 25)] = 0x3D23,
8641         [BEND_IDX( 20)] = 0x3E23,
8642         [BEND_IDX( 15)] = 0x3E23,
8643         [BEND_IDX( 10)] = 0x3F23,
8644         [BEND_IDX(  5)] = 0x3F23,
8645         [BEND_IDX(  0)] = 0x0025,
8646         [BEND_IDX( -5)] = 0x0025,
8647         [BEND_IDX(-10)] = 0x0125,
8648         [BEND_IDX(-15)] = 0x0125,
8649         [BEND_IDX(-20)] = 0x0225,
8650         [BEND_IDX(-25)] = 0x0225,
8651         [BEND_IDX(-30)] = 0x0325,
8652         [BEND_IDX(-35)] = 0x0325,
8653         [BEND_IDX(-40)] = 0x0425,
8654         [BEND_IDX(-45)] = 0x0425,
8655         [BEND_IDX(-50)] = 0x0525,
8656 };
8657
8658 /*
8659  * Bend CLKOUT_DP
8660  * steps -50 to 50 inclusive, in steps of 5
8661  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8662  * change in clock period = -(steps / 10) * 5.787 ps
8663  */
8664 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8665 {
8666         u32 tmp;
8667         int idx = BEND_IDX(steps);
8668
8669         if (WARN_ON(steps % 5 != 0))
8670                 return;
8671
8672         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8673                 return;
8674
8675         mutex_lock(&dev_priv->sb_lock);
8676
8677         if (steps % 10 != 0)
8678                 tmp = 0xAAAAAAAB;
8679         else
8680                 tmp = 0x00000000;
8681         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8682
8683         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8684         tmp &= 0xffff0000;
8685         tmp |= sscdivintphase[idx];
8686         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8687
8688         mutex_unlock(&dev_priv->sb_lock);
8689 }
8690
8691 #undef BEND_IDX
8692
8693 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8694 {
8695         struct intel_encoder *encoder;
8696         bool has_vga = false;
8697
8698         for_each_intel_encoder(&dev_priv->drm, encoder) {
8699                 switch (encoder->type) {
8700                 case INTEL_OUTPUT_ANALOG:
8701                         has_vga = true;
8702                         break;
8703                 default:
8704                         break;
8705                 }
8706         }
8707
8708         if (has_vga) {
8709                 lpt_bend_clkout_dp(dev_priv, 0);
8710                 lpt_enable_clkout_dp(dev_priv, true, true);
8711         } else {
8712                 lpt_disable_clkout_dp(dev_priv);
8713         }
8714 }
8715
8716 /*
8717  * Initialize reference clocks when the driver loads
8718  */
8719 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8720 {
8721         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8722                 ironlake_init_pch_refclk(dev_priv);
8723         else if (HAS_PCH_LPT(dev_priv))
8724                 lpt_init_pch_refclk(dev_priv);
8725 }
8726
8727 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
8728 {
8729         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8730         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8731         enum pipe pipe = crtc->pipe;
8732         u32 val;
8733
8734         val = 0;
8735
8736         switch (crtc_state->pipe_bpp) {
8737         case 18:
8738                 val |= PIPECONF_6BPC;
8739                 break;
8740         case 24:
8741                 val |= PIPECONF_8BPC;
8742                 break;
8743         case 30:
8744                 val |= PIPECONF_10BPC;
8745                 break;
8746         case 36:
8747                 val |= PIPECONF_12BPC;
8748                 break;
8749         default:
8750                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8751                 BUG();
8752         }
8753
8754         if (crtc_state->dither)
8755                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8756
8757         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8758                 val |= PIPECONF_INTERLACED_ILK;
8759         else
8760                 val |= PIPECONF_PROGRESSIVE;
8761
8762         if (crtc_state->limited_color_range)
8763                 val |= PIPECONF_COLOR_RANGE_SELECT;
8764
8765         I915_WRITE(PIPECONF(pipe), val);
8766         POSTING_READ(PIPECONF(pipe));
8767 }
8768
8769 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
8770 {
8771         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8772         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8773         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8774         u32 val = 0;
8775
8776         if (IS_HASWELL(dev_priv) && crtc_state->dither)
8777                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8778
8779         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8780                 val |= PIPECONF_INTERLACED_ILK;
8781         else
8782                 val |= PIPECONF_PROGRESSIVE;
8783
8784         I915_WRITE(PIPECONF(cpu_transcoder), val);
8785         POSTING_READ(PIPECONF(cpu_transcoder));
8786 }
8787
8788 static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
8789 {
8790         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8791         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
8792
8793         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8794                 u32 val = 0;
8795
8796                 switch (crtc_state->pipe_bpp) {
8797                 case 18:
8798                         val |= PIPEMISC_DITHER_6_BPC;
8799                         break;
8800                 case 24:
8801                         val |= PIPEMISC_DITHER_8_BPC;
8802                         break;
8803                 case 30:
8804                         val |= PIPEMISC_DITHER_10_BPC;
8805                         break;
8806                 case 36:
8807                         val |= PIPEMISC_DITHER_12_BPC;
8808                         break;
8809                 default:
8810                         /* Case prevented by pipe_config_set_bpp. */
8811                         BUG();
8812                 }
8813
8814                 if (crtc_state->dither)
8815                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8816
8817                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8818                     crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
8819                         val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8820
8821                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
8822                         val |= PIPEMISC_YUV420_ENABLE |
8823                                 PIPEMISC_YUV420_MODE_FULL_BLEND;
8824
8825                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8826         }
8827 }
8828
8829 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8830 {
8831         /*
8832          * Account for spread spectrum to avoid
8833          * oversubscribing the link. Max center spread
8834          * is 2.5%; use 5% for safety's sake.
8835          */
8836         u32 bps = target_clock * bpp * 21 / 20;
8837         return DIV_ROUND_UP(bps, link_bw * 8);
8838 }
8839
8840 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8841 {
8842         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8843 }
8844
8845 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8846                                   struct intel_crtc_state *crtc_state,
8847                                   struct dpll *reduced_clock)
8848 {
8849         struct drm_crtc *crtc = &intel_crtc->base;
8850         struct drm_device *dev = crtc->dev;
8851         struct drm_i915_private *dev_priv = to_i915(dev);
8852         u32 dpll, fp, fp2;
8853         int factor;
8854
8855         /* Enable autotuning of the PLL clock (if permissible) */
8856         factor = 21;
8857         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8858                 if ((intel_panel_use_ssc(dev_priv) &&
8859                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8860                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8861                         factor = 25;
8862         } else if (crtc_state->sdvo_tv_clock)
8863                 factor = 20;
8864
8865         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8866
8867         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8868                 fp |= FP_CB_TUNE;
8869
8870         if (reduced_clock) {
8871                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8872
8873                 if (reduced_clock->m < factor * reduced_clock->n)
8874                         fp2 |= FP_CB_TUNE;
8875         } else {
8876                 fp2 = fp;
8877         }
8878
8879         dpll = 0;
8880
8881         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8882                 dpll |= DPLLB_MODE_LVDS;
8883         else
8884                 dpll |= DPLLB_MODE_DAC_SERIAL;
8885
8886         dpll |= (crtc_state->pixel_multiplier - 1)
8887                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8888
8889         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8890             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8891                 dpll |= DPLL_SDVO_HIGH_SPEED;
8892
8893         if (intel_crtc_has_dp_encoder(crtc_state))
8894                 dpll |= DPLL_SDVO_HIGH_SPEED;
8895
8896         /*
8897          * The high speed IO clock is only really required for
8898          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8899          * possible to share the DPLL between CRT and HDMI. Enabling
8900          * the clock needlessly does no real harm, except use up a
8901          * bit of power potentially.
8902          *
8903          * We'll limit this to IVB with 3 pipes, since it has only two
8904          * DPLLs and so DPLL sharing is the only way to get three pipes
8905          * driving PCH ports at the same time. On SNB we could do this,
8906          * and potentially avoid enabling the second DPLL, but it's not
8907          * clear if it''s a win or loss power wise. No point in doing
8908          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8909          */
8910         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8911             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8912                 dpll |= DPLL_SDVO_HIGH_SPEED;
8913
8914         /* compute bitmask from p1 value */
8915         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8916         /* also FPA1 */
8917         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8918
8919         switch (crtc_state->dpll.p2) {
8920         case 5:
8921                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8922                 break;
8923         case 7:
8924                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8925                 break;
8926         case 10:
8927                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8928                 break;
8929         case 14:
8930                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8931                 break;
8932         }
8933
8934         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8935             intel_panel_use_ssc(dev_priv))
8936                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8937         else
8938                 dpll |= PLL_REF_INPUT_DREFCLK;
8939
8940         dpll |= DPLL_VCO_ENABLE;
8941
8942         crtc_state->dpll_hw_state.dpll = dpll;
8943         crtc_state->dpll_hw_state.fp0 = fp;
8944         crtc_state->dpll_hw_state.fp1 = fp2;
8945 }
8946
8947 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8948                                        struct intel_crtc_state *crtc_state)
8949 {
8950         struct drm_device *dev = crtc->base.dev;
8951         struct drm_i915_private *dev_priv = to_i915(dev);
8952         const struct intel_limit *limit;
8953         int refclk = 120000;
8954
8955         memset(&crtc_state->dpll_hw_state, 0,
8956                sizeof(crtc_state->dpll_hw_state));
8957
8958         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8959         if (!crtc_state->has_pch_encoder)
8960                 return 0;
8961
8962         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8963                 if (intel_panel_use_ssc(dev_priv)) {
8964                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8965                                       dev_priv->vbt.lvds_ssc_freq);
8966                         refclk = dev_priv->vbt.lvds_ssc_freq;
8967                 }
8968
8969                 if (intel_is_dual_link_lvds(dev)) {
8970                         if (refclk == 100000)
8971                                 limit = &intel_limits_ironlake_dual_lvds_100m;
8972                         else
8973                                 limit = &intel_limits_ironlake_dual_lvds;
8974                 } else {
8975                         if (refclk == 100000)
8976                                 limit = &intel_limits_ironlake_single_lvds_100m;
8977                         else
8978                                 limit = &intel_limits_ironlake_single_lvds;
8979                 }
8980         } else {
8981                 limit = &intel_limits_ironlake_dac;
8982         }
8983
8984         if (!crtc_state->clock_set &&
8985             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8986                                 refclk, NULL, &crtc_state->dpll)) {
8987                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8988                 return -EINVAL;
8989         }
8990
8991         ironlake_compute_dpll(crtc, crtc_state, NULL);
8992
8993         if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8994                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8995                               pipe_name(crtc->pipe));
8996                 return -EINVAL;
8997         }
8998
8999         return 0;
9000 }
9001
9002 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9003                                          struct intel_link_m_n *m_n)
9004 {
9005         struct drm_device *dev = crtc->base.dev;
9006         struct drm_i915_private *dev_priv = to_i915(dev);
9007         enum pipe pipe = crtc->pipe;
9008
9009         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9010         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9011         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9012                 & ~TU_SIZE_MASK;
9013         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9014         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9015                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9016 }
9017
9018 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9019                                          enum transcoder transcoder,
9020                                          struct intel_link_m_n *m_n,
9021                                          struct intel_link_m_n *m2_n2)
9022 {
9023         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9024         enum pipe pipe = crtc->pipe;
9025
9026         if (INTEL_GEN(dev_priv) >= 5) {
9027                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9028                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9029                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9030                         & ~TU_SIZE_MASK;
9031                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9032                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9033                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9034
9035                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9036                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9037                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9038                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9039                                         & ~TU_SIZE_MASK;
9040                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9041                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9042                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9043                 }
9044         } else {
9045                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9046                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9047                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9048                         & ~TU_SIZE_MASK;
9049                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9050                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9051                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9052         }
9053 }
9054
9055 void intel_dp_get_m_n(struct intel_crtc *crtc,
9056                       struct intel_crtc_state *pipe_config)
9057 {
9058         if (pipe_config->has_pch_encoder)
9059                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9060         else
9061                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9062                                              &pipe_config->dp_m_n,
9063                                              &pipe_config->dp_m2_n2);
9064 }
9065
9066 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9067                                         struct intel_crtc_state *pipe_config)
9068 {
9069         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9070                                      &pipe_config->fdi_m_n, NULL);
9071 }
9072
9073 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9074                                     struct intel_crtc_state *pipe_config)
9075 {
9076         struct drm_device *dev = crtc->base.dev;
9077         struct drm_i915_private *dev_priv = to_i915(dev);
9078         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9079         u32 ps_ctrl = 0;
9080         int id = -1;
9081         int i;
9082
9083         /* find scaler attached to this pipe */
9084         for (i = 0; i < crtc->num_scalers; i++) {
9085                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9086                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9087                         id = i;
9088                         pipe_config->pch_pfit.enabled = true;
9089                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9090                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9091                         scaler_state->scalers[i].in_use = true;
9092                         break;
9093                 }
9094         }
9095
9096         scaler_state->scaler_id = id;
9097         if (id >= 0) {
9098                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9099         } else {
9100                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9101         }
9102 }
9103
9104 static void
9105 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9106                                  struct intel_initial_plane_config *plane_config)
9107 {
9108         struct drm_device *dev = crtc->base.dev;
9109         struct drm_i915_private *dev_priv = to_i915(dev);
9110         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9111         enum plane_id plane_id = plane->id;
9112         enum pipe pipe;
9113         u32 val, base, offset, stride_mult, tiling, alpha;
9114         int fourcc, pixel_format;
9115         unsigned int aligned_height;
9116         struct drm_framebuffer *fb;
9117         struct intel_framebuffer *intel_fb;
9118
9119         if (!plane->get_hw_state(plane, &pipe))
9120                 return;
9121
9122         WARN_ON(pipe != crtc->pipe);
9123
9124         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9125         if (!intel_fb) {
9126                 DRM_DEBUG_KMS("failed to alloc fb\n");
9127                 return;
9128         }
9129
9130         fb = &intel_fb->base;
9131
9132         fb->dev = dev;
9133
9134         val = I915_READ(PLANE_CTL(pipe, plane_id));
9135
9136         if (INTEL_GEN(dev_priv) >= 11)
9137                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9138         else
9139                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9140
9141         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9142                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9143                 alpha &= PLANE_COLOR_ALPHA_MASK;
9144         } else {
9145                 alpha = val & PLANE_CTL_ALPHA_MASK;
9146         }
9147
9148         fourcc = skl_format_to_fourcc(pixel_format,
9149                                       val & PLANE_CTL_ORDER_RGBX, alpha);
9150         fb->format = drm_format_info(fourcc);
9151
9152         tiling = val & PLANE_CTL_TILED_MASK;
9153         switch (tiling) {
9154         case PLANE_CTL_TILED_LINEAR:
9155                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9156                 break;
9157         case PLANE_CTL_TILED_X:
9158                 plane_config->tiling = I915_TILING_X;
9159                 fb->modifier = I915_FORMAT_MOD_X_TILED;
9160                 break;
9161         case PLANE_CTL_TILED_Y:
9162                 plane_config->tiling = I915_TILING_Y;
9163                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9164                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9165                 else
9166                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
9167                 break;
9168         case PLANE_CTL_TILED_YF:
9169                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9170                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9171                 else
9172                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9173                 break;
9174         default:
9175                 MISSING_CASE(tiling);
9176                 goto error;
9177         }
9178
9179         /*
9180          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9181          * while i915 HW rotation is clockwise, thats why this swapping.
9182          */
9183         switch (val & PLANE_CTL_ROTATE_MASK) {
9184         case PLANE_CTL_ROTATE_0:
9185                 plane_config->rotation = DRM_MODE_ROTATE_0;
9186                 break;
9187         case PLANE_CTL_ROTATE_90:
9188                 plane_config->rotation = DRM_MODE_ROTATE_270;
9189                 break;
9190         case PLANE_CTL_ROTATE_180:
9191                 plane_config->rotation = DRM_MODE_ROTATE_180;
9192                 break;
9193         case PLANE_CTL_ROTATE_270:
9194                 plane_config->rotation = DRM_MODE_ROTATE_90;
9195                 break;
9196         }
9197
9198         if (INTEL_GEN(dev_priv) >= 10 &&
9199             val & PLANE_CTL_FLIP_HORIZONTAL)
9200                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9201
9202         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9203         plane_config->base = base;
9204
9205         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9206
9207         val = I915_READ(PLANE_SIZE(pipe, plane_id));
9208         fb->height = ((val >> 16) & 0xfff) + 1;
9209         fb->width = ((val >> 0) & 0x1fff) + 1;
9210
9211         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9212         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9213         fb->pitches[0] = (val & 0x3ff) * stride_mult;
9214
9215         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9216
9217         plane_config->size = fb->pitches[0] * aligned_height;
9218
9219         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9220                       crtc->base.name, plane->base.name, fb->width, fb->height,
9221                       fb->format->cpp[0] * 8, base, fb->pitches[0],
9222                       plane_config->size);
9223
9224         plane_config->fb = intel_fb;
9225         return;
9226
9227 error:
9228         kfree(intel_fb);
9229 }
9230
9231 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9232                                      struct intel_crtc_state *pipe_config)
9233 {
9234         struct drm_device *dev = crtc->base.dev;
9235         struct drm_i915_private *dev_priv = to_i915(dev);
9236         u32 tmp;
9237
9238         tmp = I915_READ(PF_CTL(crtc->pipe));
9239
9240         if (tmp & PF_ENABLE) {
9241                 pipe_config->pch_pfit.enabled = true;
9242                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9243                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9244
9245                 /* We currently do not free assignements of panel fitters on
9246                  * ivb/hsw (since we don't use the higher upscaling modes which
9247                  * differentiates them) so just WARN about this case for now. */
9248                 if (IS_GEN(dev_priv, 7)) {
9249                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9250                                 PF_PIPE_SEL_IVB(crtc->pipe));
9251                 }
9252         }
9253 }
9254
9255 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9256                                      struct intel_crtc_state *pipe_config)
9257 {
9258         struct drm_device *dev = crtc->base.dev;
9259         struct drm_i915_private *dev_priv = to_i915(dev);
9260         enum intel_display_power_domain power_domain;
9261         intel_wakeref_t wakeref;
9262         u32 tmp;
9263         bool ret;
9264
9265         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9266         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9267         if (!wakeref)
9268                 return false;
9269
9270         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9271         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9272         pipe_config->shared_dpll = NULL;
9273
9274         ret = false;
9275         tmp = I915_READ(PIPECONF(crtc->pipe));
9276         if (!(tmp & PIPECONF_ENABLE))
9277                 goto out;
9278
9279         switch (tmp & PIPECONF_BPC_MASK) {
9280         case PIPECONF_6BPC:
9281                 pipe_config->pipe_bpp = 18;
9282                 break;
9283         case PIPECONF_8BPC:
9284                 pipe_config->pipe_bpp = 24;
9285                 break;
9286         case PIPECONF_10BPC:
9287                 pipe_config->pipe_bpp = 30;
9288                 break;
9289         case PIPECONF_12BPC:
9290                 pipe_config->pipe_bpp = 36;
9291                 break;
9292         default:
9293                 break;
9294         }
9295
9296         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9297                 pipe_config->limited_color_range = true;
9298
9299         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9300                 struct intel_shared_dpll *pll;
9301                 enum intel_dpll_id pll_id;
9302
9303                 pipe_config->has_pch_encoder = true;
9304
9305                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9306                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9307                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9308
9309                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9310
9311                 if (HAS_PCH_IBX(dev_priv)) {
9312                         /*
9313                          * The pipe->pch transcoder and pch transcoder->pll
9314                          * mapping is fixed.
9315                          */
9316                         pll_id = (enum intel_dpll_id) crtc->pipe;
9317                 } else {
9318                         tmp = I915_READ(PCH_DPLL_SEL);
9319                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9320                                 pll_id = DPLL_ID_PCH_PLL_B;
9321                         else
9322                                 pll_id= DPLL_ID_PCH_PLL_A;
9323                 }
9324
9325                 pipe_config->shared_dpll =
9326                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
9327                 pll = pipe_config->shared_dpll;
9328
9329                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9330                                                 &pipe_config->dpll_hw_state));
9331
9332                 tmp = pipe_config->dpll_hw_state.dpll;
9333                 pipe_config->pixel_multiplier =
9334                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9335                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9336
9337                 ironlake_pch_clock_get(crtc, pipe_config);
9338         } else {
9339                 pipe_config->pixel_multiplier = 1;
9340         }
9341
9342         intel_get_pipe_timings(crtc, pipe_config);
9343         intel_get_pipe_src_size(crtc, pipe_config);
9344
9345         ironlake_get_pfit_config(crtc, pipe_config);
9346
9347         ret = true;
9348
9349 out:
9350         intel_display_power_put(dev_priv, power_domain, wakeref);
9351
9352         return ret;
9353 }
9354
9355 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9356 {
9357         struct drm_device *dev = &dev_priv->drm;
9358         struct intel_crtc *crtc;
9359
9360         for_each_intel_crtc(dev, crtc)
9361                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9362                      pipe_name(crtc->pipe));
9363
9364         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
9365                         "Display power well on\n");
9366         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9367         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9368         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9369         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
9370         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9371              "CPU PWM1 enabled\n");
9372         if (IS_HASWELL(dev_priv))
9373                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9374                      "CPU PWM2 enabled\n");
9375         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9376              "PCH PWM1 enabled\n");
9377         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9378              "Utility pin enabled\n");
9379         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9380
9381         /*
9382          * In theory we can still leave IRQs enabled, as long as only the HPD
9383          * interrupts remain enabled. We used to check for that, but since it's
9384          * gen-specific and since we only disable LCPLL after we fully disable
9385          * the interrupts, the check below should be enough.
9386          */
9387         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9388 }
9389
9390 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
9391 {
9392         if (IS_HASWELL(dev_priv))
9393                 return I915_READ(D_COMP_HSW);
9394         else
9395                 return I915_READ(D_COMP_BDW);
9396 }
9397
9398 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
9399 {
9400         if (IS_HASWELL(dev_priv)) {
9401                 mutex_lock(&dev_priv->pcu_lock);
9402                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9403                                             val))
9404                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9405                 mutex_unlock(&dev_priv->pcu_lock);
9406         } else {
9407                 I915_WRITE(D_COMP_BDW, val);
9408                 POSTING_READ(D_COMP_BDW);
9409         }
9410 }
9411
9412 /*
9413  * This function implements pieces of two sequences from BSpec:
9414  * - Sequence for display software to disable LCPLL
9415  * - Sequence for display software to allow package C8+
9416  * The steps implemented here are just the steps that actually touch the LCPLL
9417  * register. Callers should take care of disabling all the display engine
9418  * functions, doing the mode unset, fixing interrupts, etc.
9419  */
9420 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9421                               bool switch_to_fclk, bool allow_power_down)
9422 {
9423         u32 val;
9424
9425         assert_can_disable_lcpll(dev_priv);
9426
9427         val = I915_READ(LCPLL_CTL);
9428
9429         if (switch_to_fclk) {
9430                 val |= LCPLL_CD_SOURCE_FCLK;
9431                 I915_WRITE(LCPLL_CTL, val);
9432
9433                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9434                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9435                         DRM_ERROR("Switching to FCLK failed\n");
9436
9437                 val = I915_READ(LCPLL_CTL);
9438         }
9439
9440         val |= LCPLL_PLL_DISABLE;
9441         I915_WRITE(LCPLL_CTL, val);
9442         POSTING_READ(LCPLL_CTL);
9443
9444         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9445                 DRM_ERROR("LCPLL still locked\n");
9446
9447         val = hsw_read_dcomp(dev_priv);
9448         val |= D_COMP_COMP_DISABLE;
9449         hsw_write_dcomp(dev_priv, val);
9450         ndelay(100);
9451
9452         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9453                      1))
9454                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9455
9456         if (allow_power_down) {
9457                 val = I915_READ(LCPLL_CTL);
9458                 val |= LCPLL_POWER_DOWN_ALLOW;
9459                 I915_WRITE(LCPLL_CTL, val);
9460                 POSTING_READ(LCPLL_CTL);
9461         }
9462 }
9463
9464 /*
9465  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9466  * source.
9467  */
9468 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9469 {
9470         u32 val;
9471
9472         val = I915_READ(LCPLL_CTL);
9473
9474         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9475                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9476                 return;
9477
9478         /*
9479          * Make sure we're not on PC8 state before disabling PC8, otherwise
9480          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9481          */
9482         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9483
9484         if (val & LCPLL_POWER_DOWN_ALLOW) {
9485                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9486                 I915_WRITE(LCPLL_CTL, val);
9487                 POSTING_READ(LCPLL_CTL);
9488         }
9489
9490         val = hsw_read_dcomp(dev_priv);
9491         val |= D_COMP_COMP_FORCE;
9492         val &= ~D_COMP_COMP_DISABLE;
9493         hsw_write_dcomp(dev_priv, val);
9494
9495         val = I915_READ(LCPLL_CTL);
9496         val &= ~LCPLL_PLL_DISABLE;
9497         I915_WRITE(LCPLL_CTL, val);
9498
9499         if (intel_wait_for_register(dev_priv,
9500                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9501                                     5))
9502                 DRM_ERROR("LCPLL not locked yet\n");
9503
9504         if (val & LCPLL_CD_SOURCE_FCLK) {
9505                 val = I915_READ(LCPLL_CTL);
9506                 val &= ~LCPLL_CD_SOURCE_FCLK;
9507                 I915_WRITE(LCPLL_CTL, val);
9508
9509                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9510                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9511                         DRM_ERROR("Switching back to LCPLL failed\n");
9512         }
9513
9514         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9515
9516         intel_update_cdclk(dev_priv);
9517         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9518 }
9519
9520 /*
9521  * Package states C8 and deeper are really deep PC states that can only be
9522  * reached when all the devices on the system allow it, so even if the graphics
9523  * device allows PC8+, it doesn't mean the system will actually get to these
9524  * states. Our driver only allows PC8+ when going into runtime PM.
9525  *
9526  * The requirements for PC8+ are that all the outputs are disabled, the power
9527  * well is disabled and most interrupts are disabled, and these are also
9528  * requirements for runtime PM. When these conditions are met, we manually do
9529  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9530  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9531  * hang the machine.
9532  *
9533  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9534  * the state of some registers, so when we come back from PC8+ we need to
9535  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9536  * need to take care of the registers kept by RC6. Notice that this happens even
9537  * if we don't put the device in PCI D3 state (which is what currently happens
9538  * because of the runtime PM support).
9539  *
9540  * For more, read "Display Sequences for Package C8" on the hardware
9541  * documentation.
9542  */
9543 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9544 {
9545         u32 val;
9546
9547         DRM_DEBUG_KMS("Enabling package C8+\n");
9548
9549         if (HAS_PCH_LPT_LP(dev_priv)) {
9550                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9551                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9552                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9553         }
9554
9555         lpt_disable_clkout_dp(dev_priv);
9556         hsw_disable_lcpll(dev_priv, true, true);
9557 }
9558
9559 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9560 {
9561         u32 val;
9562
9563         DRM_DEBUG_KMS("Disabling package C8+\n");
9564
9565         hsw_restore_lcpll(dev_priv);
9566         lpt_init_pch_refclk(dev_priv);
9567
9568         if (HAS_PCH_LPT_LP(dev_priv)) {
9569                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9570                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9571                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9572         }
9573 }
9574
9575 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9576                                       struct intel_crtc_state *crtc_state)
9577 {
9578         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9579         struct intel_atomic_state *state =
9580                 to_intel_atomic_state(crtc_state->base.state);
9581
9582         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9583             IS_ICELAKE(dev_priv)) {
9584                 struct intel_encoder *encoder =
9585                         intel_get_crtc_new_encoder(state, crtc_state);
9586
9587                 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9588                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9589                                       pipe_name(crtc->pipe));
9590                         return -EINVAL;
9591                 }
9592         }
9593
9594         return 0;
9595 }
9596
9597 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9598                                    enum port port,
9599                                    struct intel_crtc_state *pipe_config)
9600 {
9601         enum intel_dpll_id id;
9602         u32 temp;
9603
9604         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9605         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9606
9607         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9608                 return;
9609
9610         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9611 }
9612
9613 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9614                                 enum port port,
9615                                 struct intel_crtc_state *pipe_config)
9616 {
9617         enum intel_dpll_id id;
9618         u32 temp;
9619
9620         /* TODO: TBT pll not implemented. */
9621         if (intel_port_is_combophy(dev_priv, port)) {
9622                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9623                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9624                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9625
9626                 if (WARN_ON(!intel_dpll_is_combophy(id)))
9627                         return;
9628         } else if (intel_port_is_tc(dev_priv, port)) {
9629                 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9630         } else {
9631                 WARN(1, "Invalid port %x\n", port);
9632                 return;
9633         }
9634
9635         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9636 }
9637
9638 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9639                                 enum port port,
9640                                 struct intel_crtc_state *pipe_config)
9641 {
9642         enum intel_dpll_id id;
9643
9644         switch (port) {
9645         case PORT_A:
9646                 id = DPLL_ID_SKL_DPLL0;
9647                 break;
9648         case PORT_B:
9649                 id = DPLL_ID_SKL_DPLL1;
9650                 break;
9651         case PORT_C:
9652                 id = DPLL_ID_SKL_DPLL2;
9653                 break;
9654         default:
9655                 DRM_ERROR("Incorrect port type\n");
9656                 return;
9657         }
9658
9659         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9660 }
9661
9662 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9663                                 enum port port,
9664                                 struct intel_crtc_state *pipe_config)
9665 {
9666         enum intel_dpll_id id;
9667         u32 temp;
9668
9669         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9670         id = temp >> (port * 3 + 1);
9671
9672         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9673                 return;
9674
9675         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9676 }
9677
9678 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9679                                 enum port port,
9680                                 struct intel_crtc_state *pipe_config)
9681 {
9682         enum intel_dpll_id id;
9683         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9684
9685         switch (ddi_pll_sel) {
9686         case PORT_CLK_SEL_WRPLL1:
9687                 id = DPLL_ID_WRPLL1;
9688                 break;
9689         case PORT_CLK_SEL_WRPLL2:
9690                 id = DPLL_ID_WRPLL2;
9691                 break;
9692         case PORT_CLK_SEL_SPLL:
9693                 id = DPLL_ID_SPLL;
9694                 break;
9695         case PORT_CLK_SEL_LCPLL_810:
9696                 id = DPLL_ID_LCPLL_810;
9697                 break;
9698         case PORT_CLK_SEL_LCPLL_1350:
9699                 id = DPLL_ID_LCPLL_1350;
9700                 break;
9701         case PORT_CLK_SEL_LCPLL_2700:
9702                 id = DPLL_ID_LCPLL_2700;
9703                 break;
9704         default:
9705                 MISSING_CASE(ddi_pll_sel);
9706                 /* fall through */
9707         case PORT_CLK_SEL_NONE:
9708                 return;
9709         }
9710
9711         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9712 }
9713
9714 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9715                                      struct intel_crtc_state *pipe_config,
9716                                      u64 *power_domain_mask)
9717 {
9718         struct drm_device *dev = crtc->base.dev;
9719         struct drm_i915_private *dev_priv = to_i915(dev);
9720         enum intel_display_power_domain power_domain;
9721         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
9722         unsigned long enabled_panel_transcoders = 0;
9723         enum transcoder panel_transcoder;
9724         u32 tmp;
9725
9726         if (IS_ICELAKE(dev_priv))
9727                 panel_transcoder_mask |=
9728                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
9729
9730         /*
9731          * The pipe->transcoder mapping is fixed with the exception of the eDP
9732          * and DSI transcoders handled below.
9733          */
9734         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9735
9736         /*
9737          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9738          * consistency and less surprising code; it's in always on power).
9739          */
9740         for_each_set_bit(panel_transcoder,
9741                          &panel_transcoder_mask,
9742                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
9743                 enum pipe trans_pipe;
9744
9745                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9746                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9747                         continue;
9748
9749                 /*
9750                  * Log all enabled ones, only use the first one.
9751                  *
9752                  * FIXME: This won't work for two separate DSI displays.
9753                  */
9754                 enabled_panel_transcoders |= BIT(panel_transcoder);
9755                 if (enabled_panel_transcoders != BIT(panel_transcoder))
9756                         continue;
9757
9758                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9759                 default:
9760                         WARN(1, "unknown pipe linked to transcoder %s\n",
9761                              transcoder_name(panel_transcoder));
9762                         /* fall through */
9763                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9764                 case TRANS_DDI_EDP_INPUT_A_ON:
9765                         trans_pipe = PIPE_A;
9766                         break;
9767                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9768                         trans_pipe = PIPE_B;
9769                         break;
9770                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9771                         trans_pipe = PIPE_C;
9772                         break;
9773                 }
9774
9775                 if (trans_pipe == crtc->pipe)
9776                         pipe_config->cpu_transcoder = panel_transcoder;
9777         }
9778
9779         /*
9780          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9781          */
9782         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
9783                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
9784
9785         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9786         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9787                 return false;
9788
9789         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9790         *power_domain_mask |= BIT_ULL(power_domain);
9791
9792         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9793
9794         return tmp & PIPECONF_ENABLE;
9795 }
9796
9797 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9798                                          struct intel_crtc_state *pipe_config,
9799                                          u64 *power_domain_mask)
9800 {
9801         struct drm_device *dev = crtc->base.dev;
9802         struct drm_i915_private *dev_priv = to_i915(dev);
9803         enum intel_display_power_domain power_domain;
9804         enum port port;
9805         enum transcoder cpu_transcoder;
9806         u32 tmp;
9807
9808         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9809                 if (port == PORT_A)
9810                         cpu_transcoder = TRANSCODER_DSI_A;
9811                 else
9812                         cpu_transcoder = TRANSCODER_DSI_C;
9813
9814                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9815                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9816                         continue;
9817
9818                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
9819                 *power_domain_mask |= BIT_ULL(power_domain);
9820
9821                 /*
9822                  * The PLL needs to be enabled with a valid divider
9823                  * configuration, otherwise accessing DSI registers will hang
9824                  * the machine. See BSpec North Display Engine
9825                  * registers/MIPI[BXT]. We can break out here early, since we
9826                  * need the same DSI PLL to be enabled for both DSI ports.
9827                  */
9828                 if (!bxt_dsi_pll_is_enabled(dev_priv))
9829                         break;
9830
9831                 /* XXX: this works for video mode only */
9832                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9833                 if (!(tmp & DPI_ENABLE))
9834                         continue;
9835
9836                 tmp = I915_READ(MIPI_CTRL(port));
9837                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9838                         continue;
9839
9840                 pipe_config->cpu_transcoder = cpu_transcoder;
9841                 break;
9842         }
9843
9844         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9845 }
9846
9847 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9848                                        struct intel_crtc_state *pipe_config)
9849 {
9850         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9851         struct intel_shared_dpll *pll;
9852         enum port port;
9853         u32 tmp;
9854
9855         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9856
9857         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9858
9859         if (IS_ICELAKE(dev_priv))
9860                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9861         else if (IS_CANNONLAKE(dev_priv))
9862                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9863         else if (IS_GEN9_BC(dev_priv))
9864                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9865         else if (IS_GEN9_LP(dev_priv))
9866                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9867         else
9868                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9869
9870         pll = pipe_config->shared_dpll;
9871         if (pll) {
9872                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9873                                                 &pipe_config->dpll_hw_state));
9874         }
9875
9876         /*
9877          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9878          * DDI E. So just check whether this pipe is wired to DDI E and whether
9879          * the PCH transcoder is on.
9880          */
9881         if (INTEL_GEN(dev_priv) < 9 &&
9882             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9883                 pipe_config->has_pch_encoder = true;
9884
9885                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9886                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9887                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9888
9889                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9890         }
9891 }
9892
9893 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9894                                     struct intel_crtc_state *pipe_config)
9895 {
9896         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9897         enum intel_display_power_domain power_domain;
9898         u64 power_domain_mask;
9899         bool active;
9900
9901         intel_crtc_init_scalers(crtc, pipe_config);
9902
9903         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9904         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9905                 return false;
9906         power_domain_mask = BIT_ULL(power_domain);
9907
9908         pipe_config->shared_dpll = NULL;
9909
9910         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9911
9912         if (IS_GEN9_LP(dev_priv) &&
9913             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9914                 WARN_ON(active);
9915                 active = true;
9916         }
9917
9918         if (!active)
9919                 goto out;
9920
9921         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
9922             IS_ICELAKE(dev_priv)) {
9923                 haswell_get_ddi_port_state(crtc, pipe_config);
9924                 intel_get_pipe_timings(crtc, pipe_config);
9925         }
9926
9927         intel_get_pipe_src_size(crtc, pipe_config);
9928         intel_get_crtc_ycbcr_config(crtc, pipe_config);
9929
9930         pipe_config->gamma_mode =
9931                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9932
9933         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9934         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9935                 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
9936                 power_domain_mask |= BIT_ULL(power_domain);
9937
9938                 if (INTEL_GEN(dev_priv) >= 9)
9939                         skylake_get_pfit_config(crtc, pipe_config);
9940                 else
9941                         ironlake_get_pfit_config(crtc, pipe_config);
9942         }
9943
9944         if (hsw_crtc_supports_ips(crtc)) {
9945                 if (IS_HASWELL(dev_priv))
9946                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9947                 else {
9948                         /*
9949                          * We cannot readout IPS state on broadwell, set to
9950                          * true so we can set it to a defined state on first
9951                          * commit.
9952                          */
9953                         pipe_config->ips_enabled = true;
9954                 }
9955         }
9956
9957         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9958             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9959                 pipe_config->pixel_multiplier =
9960                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9961         } else {
9962                 pipe_config->pixel_multiplier = 1;
9963         }
9964
9965 out:
9966         for_each_power_domain(power_domain, power_domain_mask)
9967                 intel_display_power_put_unchecked(dev_priv, power_domain);
9968
9969         return active;
9970 }
9971
9972 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9973 {
9974         struct drm_i915_private *dev_priv =
9975                 to_i915(plane_state->base.plane->dev);
9976         const struct drm_framebuffer *fb = plane_state->base.fb;
9977         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9978         u32 base;
9979
9980         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
9981                 base = obj->phys_handle->busaddr;
9982         else
9983                 base = intel_plane_ggtt_offset(plane_state);
9984
9985         base += plane_state->color_plane[0].offset;
9986
9987         /* ILK+ do this automagically */
9988         if (HAS_GMCH(dev_priv) &&
9989             plane_state->base.rotation & DRM_MODE_ROTATE_180)
9990                 base += (plane_state->base.crtc_h *
9991                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9992
9993         return base;
9994 }
9995
9996 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9997 {
9998         int x = plane_state->base.crtc_x;
9999         int y = plane_state->base.crtc_y;
10000         u32 pos = 0;
10001
10002         if (x < 0) {
10003                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10004                 x = -x;
10005         }
10006         pos |= x << CURSOR_X_SHIFT;
10007
10008         if (y < 0) {
10009                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10010                 y = -y;
10011         }
10012         pos |= y << CURSOR_Y_SHIFT;
10013
10014         return pos;
10015 }
10016
10017 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10018 {
10019         const struct drm_mode_config *config =
10020                 &plane_state->base.plane->dev->mode_config;
10021         int width = plane_state->base.crtc_w;
10022         int height = plane_state->base.crtc_h;
10023
10024         return width > 0 && width <= config->cursor_width &&
10025                 height > 0 && height <= config->cursor_height;
10026 }
10027
10028 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10029 {
10030         const struct drm_framebuffer *fb = plane_state->base.fb;
10031         unsigned int rotation = plane_state->base.rotation;
10032         int src_x, src_y;
10033         u32 offset;
10034         int ret;
10035
10036         intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
10037         plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
10038
10039         ret = intel_plane_check_stride(plane_state);
10040         if (ret)
10041                 return ret;
10042
10043         src_x = plane_state->base.src_x >> 16;
10044         src_y = plane_state->base.src_y >> 16;
10045
10046         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10047         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10048                                                     plane_state, 0);
10049
10050         if (src_x != 0 || src_y != 0) {
10051                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10052                 return -EINVAL;
10053         }
10054
10055         plane_state->color_plane[0].offset = offset;
10056
10057         return 0;
10058 }
10059
10060 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10061                               struct intel_plane_state *plane_state)
10062 {
10063         const struct drm_framebuffer *fb = plane_state->base.fb;
10064         int ret;
10065
10066         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10067                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10068                 return -EINVAL;
10069         }
10070
10071         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10072                                                   &crtc_state->base,
10073                                                   DRM_PLANE_HELPER_NO_SCALING,
10074                                                   DRM_PLANE_HELPER_NO_SCALING,
10075                                                   true, true);
10076         if (ret)
10077                 return ret;
10078
10079         if (!plane_state->base.visible)
10080                 return 0;
10081
10082         ret = intel_plane_check_src_coordinates(plane_state);
10083         if (ret)
10084                 return ret;
10085
10086         ret = intel_cursor_check_surface(plane_state);
10087         if (ret)
10088                 return ret;
10089
10090         return 0;
10091 }
10092
10093 static unsigned int
10094 i845_cursor_max_stride(struct intel_plane *plane,
10095                        u32 pixel_format, u64 modifier,
10096                        unsigned int rotation)
10097 {
10098         return 2048;
10099 }
10100
10101 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10102 {
10103         return CURSOR_GAMMA_ENABLE;
10104 }
10105
10106 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10107                            const struct intel_plane_state *plane_state)
10108 {
10109         return CURSOR_ENABLE |
10110                 CURSOR_FORMAT_ARGB |
10111                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10112 }
10113
10114 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10115 {
10116         int width = plane_state->base.crtc_w;
10117
10118         /*
10119          * 845g/865g are only limited by the width of their cursors,
10120          * the height is arbitrary up to the precision of the register.
10121          */
10122         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10123 }
10124
10125 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10126                              struct intel_plane_state *plane_state)
10127 {
10128         const struct drm_framebuffer *fb = plane_state->base.fb;
10129         int ret;
10130
10131         ret = intel_check_cursor(crtc_state, plane_state);
10132         if (ret)
10133                 return ret;
10134
10135         /* if we want to turn off the cursor ignore width and height */
10136         if (!fb)
10137                 return 0;
10138
10139         /* Check for which cursor types we support */
10140         if (!i845_cursor_size_ok(plane_state)) {
10141                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10142                           plane_state->base.crtc_w,
10143                           plane_state->base.crtc_h);
10144                 return -EINVAL;
10145         }
10146
10147         WARN_ON(plane_state->base.visible &&
10148                 plane_state->color_plane[0].stride != fb->pitches[0]);
10149
10150         switch (fb->pitches[0]) {
10151         case 256:
10152         case 512:
10153         case 1024:
10154         case 2048:
10155                 break;
10156         default:
10157                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10158                               fb->pitches[0]);
10159                 return -EINVAL;
10160         }
10161
10162         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10163
10164         return 0;
10165 }
10166
10167 static void i845_update_cursor(struct intel_plane *plane,
10168                                const struct intel_crtc_state *crtc_state,
10169                                const struct intel_plane_state *plane_state)
10170 {
10171         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10172         u32 cntl = 0, base = 0, pos = 0, size = 0;
10173         unsigned long irqflags;
10174
10175         if (plane_state && plane_state->base.visible) {
10176                 unsigned int width = plane_state->base.crtc_w;
10177                 unsigned int height = plane_state->base.crtc_h;
10178
10179                 cntl = plane_state->ctl |
10180                         i845_cursor_ctl_crtc(crtc_state);
10181
10182                 size = (height << 12) | width;
10183
10184                 base = intel_cursor_base(plane_state);
10185                 pos = intel_cursor_position(plane_state);
10186         }
10187
10188         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10189
10190         /* On these chipsets we can only modify the base/size/stride
10191          * whilst the cursor is disabled.
10192          */
10193         if (plane->cursor.base != base ||
10194             plane->cursor.size != size ||
10195             plane->cursor.cntl != cntl) {
10196                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10197                 I915_WRITE_FW(CURBASE(PIPE_A), base);
10198                 I915_WRITE_FW(CURSIZE, size);
10199                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10200                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10201
10202                 plane->cursor.base = base;
10203                 plane->cursor.size = size;
10204                 plane->cursor.cntl = cntl;
10205         } else {
10206                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10207         }
10208
10209         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10210 }
10211
10212 static void i845_disable_cursor(struct intel_plane *plane,
10213                                 const struct intel_crtc_state *crtc_state)
10214 {
10215         i845_update_cursor(plane, crtc_state, NULL);
10216 }
10217
10218 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10219                                      enum pipe *pipe)
10220 {
10221         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10222         enum intel_display_power_domain power_domain;
10223         intel_wakeref_t wakeref;
10224         bool ret;
10225
10226         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10227         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10228         if (!wakeref)
10229                 return false;
10230
10231         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10232
10233         *pipe = PIPE_A;
10234
10235         intel_display_power_put(dev_priv, power_domain, wakeref);
10236
10237         return ret;
10238 }
10239
10240 static unsigned int
10241 i9xx_cursor_max_stride(struct intel_plane *plane,
10242                        u32 pixel_format, u64 modifier,
10243                        unsigned int rotation)
10244 {
10245         return plane->base.dev->mode_config.cursor_width * 4;
10246 }
10247
10248 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10249 {
10250         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10251         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10252         u32 cntl = 0;
10253
10254         if (INTEL_GEN(dev_priv) >= 11)
10255                 return cntl;
10256
10257         cntl |= MCURSOR_GAMMA_ENABLE;
10258
10259         if (HAS_DDI(dev_priv))
10260                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10261
10262         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10263                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10264
10265         return cntl;
10266 }
10267
10268 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10269                            const struct intel_plane_state *plane_state)
10270 {
10271         struct drm_i915_private *dev_priv =
10272                 to_i915(plane_state->base.plane->dev);
10273         u32 cntl = 0;
10274
10275         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10276                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10277
10278         switch (plane_state->base.crtc_w) {
10279         case 64:
10280                 cntl |= MCURSOR_MODE_64_ARGB_AX;
10281                 break;
10282         case 128:
10283                 cntl |= MCURSOR_MODE_128_ARGB_AX;
10284                 break;
10285         case 256:
10286                 cntl |= MCURSOR_MODE_256_ARGB_AX;
10287                 break;
10288         default:
10289                 MISSING_CASE(plane_state->base.crtc_w);
10290                 return 0;
10291         }
10292
10293         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10294                 cntl |= MCURSOR_ROTATE_180;
10295
10296         return cntl;
10297 }
10298
10299 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10300 {
10301         struct drm_i915_private *dev_priv =
10302                 to_i915(plane_state->base.plane->dev);
10303         int width = plane_state->base.crtc_w;
10304         int height = plane_state->base.crtc_h;
10305
10306         if (!intel_cursor_size_ok(plane_state))
10307                 return false;
10308
10309         /* Cursor width is limited to a few power-of-two sizes */
10310         switch (width) {
10311         case 256:
10312         case 128:
10313         case 64:
10314                 break;
10315         default:
10316                 return false;
10317         }
10318
10319         /*
10320          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10321          * height from 8 lines up to the cursor width, when the
10322          * cursor is not rotated. Everything else requires square
10323          * cursors.
10324          */
10325         if (HAS_CUR_FBC(dev_priv) &&
10326             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10327                 if (height < 8 || height > width)
10328                         return false;
10329         } else {
10330                 if (height != width)
10331                         return false;
10332         }
10333
10334         return true;
10335 }
10336
10337 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10338                              struct intel_plane_state *plane_state)
10339 {
10340         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10341         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10342         const struct drm_framebuffer *fb = plane_state->base.fb;
10343         enum pipe pipe = plane->pipe;
10344         int ret;
10345
10346         ret = intel_check_cursor(crtc_state, plane_state);
10347         if (ret)
10348                 return ret;
10349
10350         /* if we want to turn off the cursor ignore width and height */
10351         if (!fb)
10352                 return 0;
10353
10354         /* Check for which cursor types we support */
10355         if (!i9xx_cursor_size_ok(plane_state)) {
10356                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10357                           plane_state->base.crtc_w,
10358                           plane_state->base.crtc_h);
10359                 return -EINVAL;
10360         }
10361
10362         WARN_ON(plane_state->base.visible &&
10363                 plane_state->color_plane[0].stride != fb->pitches[0]);
10364
10365         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10366                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10367                               fb->pitches[0], plane_state->base.crtc_w);
10368                 return -EINVAL;
10369         }
10370
10371         /*
10372          * There's something wrong with the cursor on CHV pipe C.
10373          * If it straddles the left edge of the screen then
10374          * moving it away from the edge or disabling it often
10375          * results in a pipe underrun, and often that can lead to
10376          * dead pipe (constant underrun reported, and it scans
10377          * out just a solid color). To recover from that, the
10378          * display power well must be turned off and on again.
10379          * Refuse the put the cursor into that compromised position.
10380          */
10381         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10382             plane_state->base.visible && plane_state->base.crtc_x < 0) {
10383                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10384                 return -EINVAL;
10385         }
10386
10387         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10388
10389         return 0;
10390 }
10391
10392 static void i9xx_update_cursor(struct intel_plane *plane,
10393                                const struct intel_crtc_state *crtc_state,
10394                                const struct intel_plane_state *plane_state)
10395 {
10396         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10397         enum pipe pipe = plane->pipe;
10398         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10399         unsigned long irqflags;
10400
10401         if (plane_state && plane_state->base.visible) {
10402                 cntl = plane_state->ctl |
10403                         i9xx_cursor_ctl_crtc(crtc_state);
10404
10405                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10406                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10407
10408                 base = intel_cursor_base(plane_state);
10409                 pos = intel_cursor_position(plane_state);
10410         }
10411
10412         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10413
10414         /*
10415          * On some platforms writing CURCNTR first will also
10416          * cause CURPOS to be armed by the CURBASE write.
10417          * Without the CURCNTR write the CURPOS write would
10418          * arm itself. Thus we always update CURCNTR before
10419          * CURPOS.
10420          *
10421          * On other platforms CURPOS always requires the
10422          * CURBASE write to arm the update. Additonally
10423          * a write to any of the cursor register will cancel
10424          * an already armed cursor update. Thus leaving out
10425          * the CURBASE write after CURPOS could lead to a
10426          * cursor that doesn't appear to move, or even change
10427          * shape. Thus we always write CURBASE.
10428          *
10429          * The other registers are armed by by the CURBASE write
10430          * except when the plane is getting enabled at which time
10431          * the CURCNTR write arms the update.
10432          */
10433
10434         if (INTEL_GEN(dev_priv) >= 9)
10435                 skl_write_cursor_wm(plane, crtc_state);
10436
10437         if (plane->cursor.base != base ||
10438             plane->cursor.size != fbc_ctl ||
10439             plane->cursor.cntl != cntl) {
10440                 if (HAS_CUR_FBC(dev_priv))
10441                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10442                 I915_WRITE_FW(CURCNTR(pipe), cntl);
10443                 I915_WRITE_FW(CURPOS(pipe), pos);
10444                 I915_WRITE_FW(CURBASE(pipe), base);
10445
10446                 plane->cursor.base = base;
10447                 plane->cursor.size = fbc_ctl;
10448                 plane->cursor.cntl = cntl;
10449         } else {
10450                 I915_WRITE_FW(CURPOS(pipe), pos);
10451                 I915_WRITE_FW(CURBASE(pipe), base);
10452         }
10453
10454         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10455 }
10456
10457 static void i9xx_disable_cursor(struct intel_plane *plane,
10458                                 const struct intel_crtc_state *crtc_state)
10459 {
10460         i9xx_update_cursor(plane, crtc_state, NULL);
10461 }
10462
10463 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10464                                      enum pipe *pipe)
10465 {
10466         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10467         enum intel_display_power_domain power_domain;
10468         intel_wakeref_t wakeref;
10469         bool ret;
10470         u32 val;
10471
10472         /*
10473          * Not 100% correct for planes that can move between pipes,
10474          * but that's only the case for gen2-3 which don't have any
10475          * display power wells.
10476          */
10477         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10478         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10479         if (!wakeref)
10480                 return false;
10481
10482         val = I915_READ(CURCNTR(plane->pipe));
10483
10484         ret = val & MCURSOR_MODE;
10485
10486         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10487                 *pipe = plane->pipe;
10488         else
10489                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10490                         MCURSOR_PIPE_SELECT_SHIFT;
10491
10492         intel_display_power_put(dev_priv, power_domain, wakeref);
10493
10494         return ret;
10495 }
10496
10497 /* VESA 640x480x72Hz mode to set on the pipe */
10498 static const struct drm_display_mode load_detect_mode = {
10499         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10500                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10501 };
10502
10503 struct drm_framebuffer *
10504 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10505                          struct drm_mode_fb_cmd2 *mode_cmd)
10506 {
10507         struct intel_framebuffer *intel_fb;
10508         int ret;
10509
10510         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10511         if (!intel_fb)
10512                 return ERR_PTR(-ENOMEM);
10513
10514         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10515         if (ret)
10516                 goto err;
10517
10518         return &intel_fb->base;
10519
10520 err:
10521         kfree(intel_fb);
10522         return ERR_PTR(ret);
10523 }
10524
10525 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10526                                         struct drm_crtc *crtc)
10527 {
10528         struct drm_plane *plane;
10529         struct drm_plane_state *plane_state;
10530         int ret, i;
10531
10532         ret = drm_atomic_add_affected_planes(state, crtc);
10533         if (ret)
10534                 return ret;
10535
10536         for_each_new_plane_in_state(state, plane, plane_state, i) {
10537                 if (plane_state->crtc != crtc)
10538                         continue;
10539
10540                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10541                 if (ret)
10542                         return ret;
10543
10544                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10545         }
10546
10547         return 0;
10548 }
10549
10550 int intel_get_load_detect_pipe(struct drm_connector *connector,
10551                                const struct drm_display_mode *mode,
10552                                struct intel_load_detect_pipe *old,
10553                                struct drm_modeset_acquire_ctx *ctx)
10554 {
10555         struct intel_crtc *intel_crtc;
10556         struct intel_encoder *intel_encoder =
10557                 intel_attached_encoder(connector);
10558         struct drm_crtc *possible_crtc;
10559         struct drm_encoder *encoder = &intel_encoder->base;
10560         struct drm_crtc *crtc = NULL;
10561         struct drm_device *dev = encoder->dev;
10562         struct drm_i915_private *dev_priv = to_i915(dev);
10563         struct drm_mode_config *config = &dev->mode_config;
10564         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10565         struct drm_connector_state *connector_state;
10566         struct intel_crtc_state *crtc_state;
10567         int ret, i = -1;
10568
10569         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10570                       connector->base.id, connector->name,
10571                       encoder->base.id, encoder->name);
10572
10573         old->restore_state = NULL;
10574
10575         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10576
10577         /*
10578          * Algorithm gets a little messy:
10579          *
10580          *   - if the connector already has an assigned crtc, use it (but make
10581          *     sure it's on first)
10582          *
10583          *   - try to find the first unused crtc that can drive this connector,
10584          *     and use that if we find one
10585          */
10586
10587         /* See if we already have a CRTC for this connector */
10588         if (connector->state->crtc) {
10589                 crtc = connector->state->crtc;
10590
10591                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10592                 if (ret)
10593                         goto fail;
10594
10595                 /* Make sure the crtc and connector are running */
10596                 goto found;
10597         }
10598
10599         /* Find an unused one (if possible) */
10600         for_each_crtc(dev, possible_crtc) {
10601                 i++;
10602                 if (!(encoder->possible_crtcs & (1 << i)))
10603                         continue;
10604
10605                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10606                 if (ret)
10607                         goto fail;
10608
10609                 if (possible_crtc->state->enable) {
10610                         drm_modeset_unlock(&possible_crtc->mutex);
10611                         continue;
10612                 }
10613
10614                 crtc = possible_crtc;
10615                 break;
10616         }
10617
10618         /*
10619          * If we didn't find an unused CRTC, don't use any.
10620          */
10621         if (!crtc) {
10622                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10623                 ret = -ENODEV;
10624                 goto fail;
10625         }
10626
10627 found:
10628         intel_crtc = to_intel_crtc(crtc);
10629
10630         state = drm_atomic_state_alloc(dev);
10631         restore_state = drm_atomic_state_alloc(dev);
10632         if (!state || !restore_state) {
10633                 ret = -ENOMEM;
10634                 goto fail;
10635         }
10636
10637         state->acquire_ctx = ctx;
10638         restore_state->acquire_ctx = ctx;
10639
10640         connector_state = drm_atomic_get_connector_state(state, connector);
10641         if (IS_ERR(connector_state)) {
10642                 ret = PTR_ERR(connector_state);
10643                 goto fail;
10644         }
10645
10646         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10647         if (ret)
10648                 goto fail;
10649
10650         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10651         if (IS_ERR(crtc_state)) {
10652                 ret = PTR_ERR(crtc_state);
10653                 goto fail;
10654         }
10655
10656         crtc_state->base.active = crtc_state->base.enable = true;
10657
10658         if (!mode)
10659                 mode = &load_detect_mode;
10660
10661         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10662         if (ret)
10663                 goto fail;
10664
10665         ret = intel_modeset_disable_planes(state, crtc);
10666         if (ret)
10667                 goto fail;
10668
10669         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10670         if (!ret)
10671                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10672         if (!ret)
10673                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10674         if (ret) {
10675                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10676                 goto fail;
10677         }
10678
10679         ret = drm_atomic_commit(state);
10680         if (ret) {
10681                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10682                 goto fail;
10683         }
10684
10685         old->restore_state = restore_state;
10686         drm_atomic_state_put(state);
10687
10688         /* let the connector get through one full cycle before testing */
10689         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10690         return true;
10691
10692 fail:
10693         if (state) {
10694                 drm_atomic_state_put(state);
10695                 state = NULL;
10696         }
10697         if (restore_state) {
10698                 drm_atomic_state_put(restore_state);
10699                 restore_state = NULL;
10700         }
10701
10702         if (ret == -EDEADLK)
10703                 return ret;
10704
10705         return false;
10706 }
10707
10708 void intel_release_load_detect_pipe(struct drm_connector *connector,
10709                                     struct intel_load_detect_pipe *old,
10710                                     struct drm_modeset_acquire_ctx *ctx)
10711 {
10712         struct intel_encoder *intel_encoder =
10713                 intel_attached_encoder(connector);
10714         struct drm_encoder *encoder = &intel_encoder->base;
10715         struct drm_atomic_state *state = old->restore_state;
10716         int ret;
10717
10718         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10719                       connector->base.id, connector->name,
10720                       encoder->base.id, encoder->name);
10721
10722         if (!state)
10723                 return;
10724
10725         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10726         if (ret)
10727                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10728         drm_atomic_state_put(state);
10729 }
10730
10731 static int i9xx_pll_refclk(struct drm_device *dev,
10732                            const struct intel_crtc_state *pipe_config)
10733 {
10734         struct drm_i915_private *dev_priv = to_i915(dev);
10735         u32 dpll = pipe_config->dpll_hw_state.dpll;
10736
10737         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10738                 return dev_priv->vbt.lvds_ssc_freq;
10739         else if (HAS_PCH_SPLIT(dev_priv))
10740                 return 120000;
10741         else if (!IS_GEN(dev_priv, 2))
10742                 return 96000;
10743         else
10744                 return 48000;
10745 }
10746
10747 /* Returns the clock of the currently programmed mode of the given pipe. */
10748 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10749                                 struct intel_crtc_state *pipe_config)
10750 {
10751         struct drm_device *dev = crtc->base.dev;
10752         struct drm_i915_private *dev_priv = to_i915(dev);
10753         int pipe = pipe_config->cpu_transcoder;
10754         u32 dpll = pipe_config->dpll_hw_state.dpll;
10755         u32 fp;
10756         struct dpll clock;
10757         int port_clock;
10758         int refclk = i9xx_pll_refclk(dev, pipe_config);
10759
10760         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10761                 fp = pipe_config->dpll_hw_state.fp0;
10762         else
10763                 fp = pipe_config->dpll_hw_state.fp1;
10764
10765         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10766         if (IS_PINEVIEW(dev_priv)) {
10767                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10768                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10769         } else {
10770                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10771                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10772         }
10773
10774         if (!IS_GEN(dev_priv, 2)) {
10775                 if (IS_PINEVIEW(dev_priv))
10776                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10777                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10778                 else
10779                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10780                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10781
10782                 switch (dpll & DPLL_MODE_MASK) {
10783                 case DPLLB_MODE_DAC_SERIAL:
10784                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10785                                 5 : 10;
10786                         break;
10787                 case DPLLB_MODE_LVDS:
10788                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10789                                 7 : 14;
10790                         break;
10791                 default:
10792                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10793                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10794                         return;
10795                 }
10796
10797                 if (IS_PINEVIEW(dev_priv))
10798                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10799                 else
10800                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10801         } else {
10802                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10803                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10804
10805                 if (is_lvds) {
10806                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10807                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10808
10809                         if (lvds & LVDS_CLKB_POWER_UP)
10810                                 clock.p2 = 7;
10811                         else
10812                                 clock.p2 = 14;
10813                 } else {
10814                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10815                                 clock.p1 = 2;
10816                         else {
10817                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10818                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10819                         }
10820                         if (dpll & PLL_P2_DIVIDE_BY_4)
10821                                 clock.p2 = 4;
10822                         else
10823                                 clock.p2 = 2;
10824                 }
10825
10826                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10827         }
10828
10829         /*
10830          * This value includes pixel_multiplier. We will use
10831          * port_clock to compute adjusted_mode.crtc_clock in the
10832          * encoder's get_config() function.
10833          */
10834         pipe_config->port_clock = port_clock;
10835 }
10836
10837 int intel_dotclock_calculate(int link_freq,
10838                              const struct intel_link_m_n *m_n)
10839 {
10840         /*
10841          * The calculation for the data clock is:
10842          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10843          * But we want to avoid losing precison if possible, so:
10844          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10845          *
10846          * and the link clock is simpler:
10847          * link_clock = (m * link_clock) / n
10848          */
10849
10850         if (!m_n->link_n)
10851                 return 0;
10852
10853         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10854 }
10855
10856 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10857                                    struct intel_crtc_state *pipe_config)
10858 {
10859         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10860
10861         /* read out port_clock from the DPLL */
10862         i9xx_crtc_clock_get(crtc, pipe_config);
10863
10864         /*
10865          * In case there is an active pipe without active ports,
10866          * we may need some idea for the dotclock anyway.
10867          * Calculate one based on the FDI configuration.
10868          */
10869         pipe_config->base.adjusted_mode.crtc_clock =
10870                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10871                                          &pipe_config->fdi_m_n);
10872 }
10873
10874 /* Returns the currently programmed mode of the given encoder. */
10875 struct drm_display_mode *
10876 intel_encoder_current_mode(struct intel_encoder *encoder)
10877 {
10878         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10879         struct intel_crtc_state *crtc_state;
10880         struct drm_display_mode *mode;
10881         struct intel_crtc *crtc;
10882         enum pipe pipe;
10883
10884         if (!encoder->get_hw_state(encoder, &pipe))
10885                 return NULL;
10886
10887         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10888
10889         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10890         if (!mode)
10891                 return NULL;
10892
10893         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10894         if (!crtc_state) {
10895                 kfree(mode);
10896                 return NULL;
10897         }
10898
10899         crtc_state->base.crtc = &crtc->base;
10900
10901         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10902                 kfree(crtc_state);
10903                 kfree(mode);
10904                 return NULL;
10905         }
10906
10907         encoder->get_config(encoder, crtc_state);
10908
10909         intel_mode_from_pipe_config(mode, crtc_state);
10910
10911         kfree(crtc_state);
10912
10913         return mode;
10914 }
10915
10916 static void intel_crtc_destroy(struct drm_crtc *crtc)
10917 {
10918         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10919
10920         drm_crtc_cleanup(crtc);
10921         kfree(intel_crtc);
10922 }
10923
10924 /**
10925  * intel_wm_need_update - Check whether watermarks need updating
10926  * @cur: current plane state
10927  * @new: new plane state
10928  *
10929  * Check current plane state versus the new one to determine whether
10930  * watermarks need to be recalculated.
10931  *
10932  * Returns true or false.
10933  */
10934 static bool intel_wm_need_update(struct intel_plane_state *cur,
10935                                  struct intel_plane_state *new)
10936 {
10937         /* Update watermarks on tiling or size changes. */
10938         if (new->base.visible != cur->base.visible)
10939                 return true;
10940
10941         if (!cur->base.fb || !new->base.fb)
10942                 return false;
10943
10944         if (cur->base.fb->modifier != new->base.fb->modifier ||
10945             cur->base.rotation != new->base.rotation ||
10946             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10947             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10948             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10949             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10950                 return true;
10951
10952         return false;
10953 }
10954
10955 static bool needs_scaling(const struct intel_plane_state *state)
10956 {
10957         int src_w = drm_rect_width(&state->base.src) >> 16;
10958         int src_h = drm_rect_height(&state->base.src) >> 16;
10959         int dst_w = drm_rect_width(&state->base.dst);
10960         int dst_h = drm_rect_height(&state->base.dst);
10961
10962         return (src_w != dst_w || src_h != dst_h);
10963 }
10964
10965 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10966                                     struct drm_crtc_state *crtc_state,
10967                                     const struct intel_plane_state *old_plane_state,
10968                                     struct drm_plane_state *plane_state)
10969 {
10970         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10971         struct drm_crtc *crtc = crtc_state->crtc;
10972         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10973         struct intel_plane *plane = to_intel_plane(plane_state->plane);
10974         struct drm_device *dev = crtc->dev;
10975         struct drm_i915_private *dev_priv = to_i915(dev);
10976         bool mode_changed = needs_modeset(crtc_state);
10977         bool was_crtc_enabled = old_crtc_state->base.active;
10978         bool is_crtc_enabled = crtc_state->active;
10979         bool turn_off, turn_on, visible, was_visible;
10980         struct drm_framebuffer *fb = plane_state->fb;
10981         int ret;
10982
10983         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10984                 ret = skl_update_scaler_plane(
10985                         to_intel_crtc_state(crtc_state),
10986                         to_intel_plane_state(plane_state));
10987                 if (ret)
10988                         return ret;
10989         }
10990
10991         was_visible = old_plane_state->base.visible;
10992         visible = plane_state->visible;
10993
10994         if (!was_crtc_enabled && WARN_ON(was_visible))
10995                 was_visible = false;
10996
10997         /*
10998          * Visibility is calculated as if the crtc was on, but
10999          * after scaler setup everything depends on it being off
11000          * when the crtc isn't active.
11001          *
11002          * FIXME this is wrong for watermarks. Watermarks should also
11003          * be computed as if the pipe would be active. Perhaps move
11004          * per-plane wm computation to the .check_plane() hook, and
11005          * only combine the results from all planes in the current place?
11006          */
11007         if (!is_crtc_enabled) {
11008                 plane_state->visible = visible = false;
11009                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
11010         }
11011
11012         if (!was_visible && !visible)
11013                 return 0;
11014
11015         if (fb != old_plane_state->base.fb)
11016                 pipe_config->fb_changed = true;
11017
11018         turn_off = was_visible && (!visible || mode_changed);
11019         turn_on = visible && (!was_visible || mode_changed);
11020
11021         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11022                          intel_crtc->base.base.id, intel_crtc->base.name,
11023                          plane->base.base.id, plane->base.name,
11024                          fb ? fb->base.id : -1);
11025
11026         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11027                          plane->base.base.id, plane->base.name,
11028                          was_visible, visible,
11029                          turn_off, turn_on, mode_changed);
11030
11031         if (turn_on) {
11032                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11033                         pipe_config->update_wm_pre = true;
11034
11035                 /* must disable cxsr around plane enable/disable */
11036                 if (plane->id != PLANE_CURSOR)
11037                         pipe_config->disable_cxsr = true;
11038         } else if (turn_off) {
11039                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11040                         pipe_config->update_wm_post = true;
11041
11042                 /* must disable cxsr around plane enable/disable */
11043                 if (plane->id != PLANE_CURSOR)
11044                         pipe_config->disable_cxsr = true;
11045         } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11046                                         to_intel_plane_state(plane_state))) {
11047                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11048                         /* FIXME bollocks */
11049                         pipe_config->update_wm_pre = true;
11050                         pipe_config->update_wm_post = true;
11051                 }
11052         }
11053
11054         if (visible || was_visible)
11055                 pipe_config->fb_bits |= plane->frontbuffer_bit;
11056
11057         /*
11058          * ILK/SNB DVSACNTR/Sprite Enable
11059          * IVB SPR_CTL/Sprite Enable
11060          * "When in Self Refresh Big FIFO mode, a write to enable the
11061          *  plane will be internally buffered and delayed while Big FIFO
11062          *  mode is exiting."
11063          *
11064          * Which means that enabling the sprite can take an extra frame
11065          * when we start in big FIFO mode (LP1+). Thus we need to drop
11066          * down to LP0 and wait for vblank in order to make sure the
11067          * sprite gets enabled on the next vblank after the register write.
11068          * Doing otherwise would risk enabling the sprite one frame after
11069          * we've already signalled flip completion. We can resume LP1+
11070          * once the sprite has been enabled.
11071          *
11072          *
11073          * WaCxSRDisabledForSpriteScaling:ivb
11074          * IVB SPR_SCALE/Scaling Enable
11075          * "Low Power watermarks must be disabled for at least one
11076          *  frame before enabling sprite scaling, and kept disabled
11077          *  until sprite scaling is disabled."
11078          *
11079          * ILK/SNB DVSASCALE/Scaling Enable
11080          * "When in Self Refresh Big FIFO mode, scaling enable will be
11081          *  masked off while Big FIFO mode is exiting."
11082          *
11083          * Despite the w/a only being listed for IVB we assume that
11084          * the ILK/SNB note has similar ramifications, hence we apply
11085          * the w/a on all three platforms.
11086          *
11087          * With experimental results seems this is needed also for primary
11088          * plane, not only sprite plane.
11089          */
11090         if (plane->id != PLANE_CURSOR &&
11091             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11092              IS_IVYBRIDGE(dev_priv)) &&
11093             (turn_on || (!needs_scaling(old_plane_state) &&
11094                          needs_scaling(to_intel_plane_state(plane_state)))))
11095                 pipe_config->disable_lp_wm = true;
11096
11097         return 0;
11098 }
11099
11100 static bool encoders_cloneable(const struct intel_encoder *a,
11101                                const struct intel_encoder *b)
11102 {
11103         /* masks could be asymmetric, so check both ways */
11104         return a == b || (a->cloneable & (1 << b->type) &&
11105                           b->cloneable & (1 << a->type));
11106 }
11107
11108 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11109                                          struct intel_crtc *crtc,
11110                                          struct intel_encoder *encoder)
11111 {
11112         struct intel_encoder *source_encoder;
11113         struct drm_connector *connector;
11114         struct drm_connector_state *connector_state;
11115         int i;
11116
11117         for_each_new_connector_in_state(state, connector, connector_state, i) {
11118                 if (connector_state->crtc != &crtc->base)
11119                         continue;
11120
11121                 source_encoder =
11122                         to_intel_encoder(connector_state->best_encoder);
11123                 if (!encoders_cloneable(encoder, source_encoder))
11124                         return false;
11125         }
11126
11127         return true;
11128 }
11129
11130 static int icl_add_linked_planes(struct intel_atomic_state *state)
11131 {
11132         struct intel_plane *plane, *linked;
11133         struct intel_plane_state *plane_state, *linked_plane_state;
11134         int i;
11135
11136         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11137                 linked = plane_state->linked_plane;
11138
11139                 if (!linked)
11140                         continue;
11141
11142                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11143                 if (IS_ERR(linked_plane_state))
11144                         return PTR_ERR(linked_plane_state);
11145
11146                 WARN_ON(linked_plane_state->linked_plane != plane);
11147                 WARN_ON(linked_plane_state->slave == plane_state->slave);
11148         }
11149
11150         return 0;
11151 }
11152
11153 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11154 {
11155         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11156         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11157         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11158         struct intel_plane *plane, *linked;
11159         struct intel_plane_state *plane_state;
11160         int i;
11161
11162         if (INTEL_GEN(dev_priv) < 11)
11163                 return 0;
11164
11165         /*
11166          * Destroy all old plane links and make the slave plane invisible
11167          * in the crtc_state->active_planes mask.
11168          */
11169         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11170                 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11171                         continue;
11172
11173                 plane_state->linked_plane = NULL;
11174                 if (plane_state->slave && !plane_state->base.visible) {
11175                         crtc_state->active_planes &= ~BIT(plane->id);
11176                         crtc_state->update_planes |= BIT(plane->id);
11177                 }
11178
11179                 plane_state->slave = false;
11180         }
11181
11182         if (!crtc_state->nv12_planes)
11183                 return 0;
11184
11185         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11186                 struct intel_plane_state *linked_state = NULL;
11187
11188                 if (plane->pipe != crtc->pipe ||
11189                     !(crtc_state->nv12_planes & BIT(plane->id)))
11190                         continue;
11191
11192                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11193                         if (!icl_is_nv12_y_plane(linked->id))
11194                                 continue;
11195
11196                         if (crtc_state->active_planes & BIT(linked->id))
11197                                 continue;
11198
11199                         linked_state = intel_atomic_get_plane_state(state, linked);
11200                         if (IS_ERR(linked_state))
11201                                 return PTR_ERR(linked_state);
11202
11203                         break;
11204                 }
11205
11206                 if (!linked_state) {
11207                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11208                                       hweight8(crtc_state->nv12_planes));
11209
11210                         return -EINVAL;
11211                 }
11212
11213                 plane_state->linked_plane = linked;
11214
11215                 linked_state->slave = true;
11216                 linked_state->linked_plane = plane;
11217                 crtc_state->active_planes |= BIT(linked->id);
11218                 crtc_state->update_planes |= BIT(linked->id);
11219                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11220         }
11221
11222         return 0;
11223 }
11224
11225 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11226                                    struct drm_crtc_state *crtc_state)
11227 {
11228         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11229         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11230         struct intel_crtc_state *pipe_config =
11231                 to_intel_crtc_state(crtc_state);
11232         int ret;
11233         bool mode_changed = needs_modeset(crtc_state);
11234
11235         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11236             mode_changed && !crtc_state->active)
11237                 pipe_config->update_wm_post = true;
11238
11239         if (mode_changed && crtc_state->enable &&
11240             dev_priv->display.crtc_compute_clock &&
11241             !WARN_ON(pipe_config->shared_dpll)) {
11242                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11243                                                            pipe_config);
11244                 if (ret)
11245                         return ret;
11246         }
11247
11248         if (mode_changed || crtc_state->color_mgmt_changed) {
11249                 ret = intel_color_check(pipe_config);
11250                 if (ret)
11251                         return ret;
11252
11253                 /*
11254                  * Changing color management on Intel hardware is
11255                  * handled as part of planes update.
11256                  */
11257                 crtc_state->planes_changed = true;
11258         }
11259
11260         ret = 0;
11261         if (dev_priv->display.compute_pipe_wm) {
11262                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11263                 if (ret) {
11264                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11265                         return ret;
11266                 }
11267         }
11268
11269         if (dev_priv->display.compute_intermediate_wm) {
11270                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11271                         return 0;
11272
11273                 /*
11274                  * Calculate 'intermediate' watermarks that satisfy both the
11275                  * old state and the new state.  We can program these
11276                  * immediately.
11277                  */
11278                 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11279                 if (ret) {
11280                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11281                         return ret;
11282                 }
11283         }
11284
11285         if (INTEL_GEN(dev_priv) >= 9) {
11286                 if (mode_changed || pipe_config->update_pipe)
11287                         ret = skl_update_scaler_crtc(pipe_config);
11288
11289                 if (!ret)
11290                         ret = icl_check_nv12_planes(pipe_config);
11291                 if (!ret)
11292                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11293                                                             pipe_config);
11294                 if (!ret)
11295                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11296                                                          pipe_config);
11297         }
11298
11299         if (HAS_IPS(dev_priv))
11300                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11301
11302         return ret;
11303 }
11304
11305 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11306         .atomic_check = intel_crtc_atomic_check,
11307 };
11308
11309 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11310 {
11311         struct intel_connector *connector;
11312         struct drm_connector_list_iter conn_iter;
11313
11314         drm_connector_list_iter_begin(dev, &conn_iter);
11315         for_each_intel_connector_iter(connector, &conn_iter) {
11316                 if (connector->base.state->crtc)
11317                         drm_connector_put(&connector->base);
11318
11319                 if (connector->base.encoder) {
11320                         connector->base.state->best_encoder =
11321                                 connector->base.encoder;
11322                         connector->base.state->crtc =
11323                                 connector->base.encoder->crtc;
11324
11325                         drm_connector_get(&connector->base);
11326                 } else {
11327                         connector->base.state->best_encoder = NULL;
11328                         connector->base.state->crtc = NULL;
11329                 }
11330         }
11331         drm_connector_list_iter_end(&conn_iter);
11332 }
11333
11334 static int
11335 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11336                       struct intel_crtc_state *pipe_config)
11337 {
11338         struct drm_connector *connector = conn_state->connector;
11339         const struct drm_display_info *info = &connector->display_info;
11340         int bpp;
11341
11342         switch (conn_state->max_bpc) {
11343         case 6 ... 7:
11344                 bpp = 6 * 3;
11345                 break;
11346         case 8 ... 9:
11347                 bpp = 8 * 3;
11348                 break;
11349         case 10 ... 11:
11350                 bpp = 10 * 3;
11351                 break;
11352         case 12:
11353                 bpp = 12 * 3;
11354                 break;
11355         default:
11356                 return -EINVAL;
11357         }
11358
11359         if (bpp < pipe_config->pipe_bpp) {
11360                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11361                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11362                               connector->base.id, connector->name,
11363                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11364                               pipe_config->pipe_bpp);
11365
11366                 pipe_config->pipe_bpp = bpp;
11367         }
11368
11369         return 0;
11370 }
11371
11372 static int
11373 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11374                           struct intel_crtc_state *pipe_config)
11375 {
11376         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11377         struct drm_atomic_state *state = pipe_config->base.state;
11378         struct drm_connector *connector;
11379         struct drm_connector_state *connector_state;
11380         int bpp, i;
11381
11382         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11383             IS_CHERRYVIEW(dev_priv)))
11384                 bpp = 10*3;
11385         else if (INTEL_GEN(dev_priv) >= 5)
11386                 bpp = 12*3;
11387         else
11388                 bpp = 8*3;
11389
11390         pipe_config->pipe_bpp = bpp;
11391
11392         /* Clamp display bpp to connector max bpp */
11393         for_each_new_connector_in_state(state, connector, connector_state, i) {
11394                 int ret;
11395
11396                 if (connector_state->crtc != &crtc->base)
11397                         continue;
11398
11399                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11400                 if (ret)
11401                         return ret;
11402         }
11403
11404         return 0;
11405 }
11406
11407 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11408 {
11409         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11410                         "type: 0x%x flags: 0x%x\n",
11411                 mode->crtc_clock,
11412                 mode->crtc_hdisplay, mode->crtc_hsync_start,
11413                 mode->crtc_hsync_end, mode->crtc_htotal,
11414                 mode->crtc_vdisplay, mode->crtc_vsync_start,
11415                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11416 }
11417
11418 static inline void
11419 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
11420                       unsigned int lane_count, struct intel_link_m_n *m_n)
11421 {
11422         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11423                       id, lane_count,
11424                       m_n->gmch_m, m_n->gmch_n,
11425                       m_n->link_m, m_n->link_n, m_n->tu);
11426 }
11427
11428 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11429
11430 static const char * const output_type_str[] = {
11431         OUTPUT_TYPE(UNUSED),
11432         OUTPUT_TYPE(ANALOG),
11433         OUTPUT_TYPE(DVO),
11434         OUTPUT_TYPE(SDVO),
11435         OUTPUT_TYPE(LVDS),
11436         OUTPUT_TYPE(TVOUT),
11437         OUTPUT_TYPE(HDMI),
11438         OUTPUT_TYPE(DP),
11439         OUTPUT_TYPE(EDP),
11440         OUTPUT_TYPE(DSI),
11441         OUTPUT_TYPE(DDI),
11442         OUTPUT_TYPE(DP_MST),
11443 };
11444
11445 #undef OUTPUT_TYPE
11446
11447 static void snprintf_output_types(char *buf, size_t len,
11448                                   unsigned int output_types)
11449 {
11450         char *str = buf;
11451         int i;
11452
11453         str[0] = '\0';
11454
11455         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11456                 int r;
11457
11458                 if ((output_types & BIT(i)) == 0)
11459                         continue;
11460
11461                 r = snprintf(str, len, "%s%s",
11462                              str != buf ? "," : "", output_type_str[i]);
11463                 if (r >= len)
11464                         break;
11465                 str += r;
11466                 len -= r;
11467
11468                 output_types &= ~BIT(i);
11469         }
11470
11471         WARN_ON_ONCE(output_types != 0);
11472 }
11473
11474 static const char * const output_format_str[] = {
11475         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11476         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11477         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11478         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11479 };
11480
11481 static const char *output_formats(enum intel_output_format format)
11482 {
11483         if (format >= ARRAY_SIZE(output_format_str))
11484                 format = INTEL_OUTPUT_FORMAT_INVALID;
11485         return output_format_str[format];
11486 }
11487
11488 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11489                                    struct intel_crtc_state *pipe_config,
11490                                    const char *context)
11491 {
11492         struct drm_device *dev = crtc->base.dev;
11493         struct drm_i915_private *dev_priv = to_i915(dev);
11494         struct drm_plane *plane;
11495         struct intel_plane *intel_plane;
11496         struct intel_plane_state *state;
11497         struct drm_framebuffer *fb;
11498         char buf[64];
11499
11500         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11501                       crtc->base.base.id, crtc->base.name, context);
11502
11503         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11504         DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11505                       buf, pipe_config->output_types);
11506
11507         DRM_DEBUG_KMS("output format: %s\n",
11508                       output_formats(pipe_config->output_format));
11509
11510         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11511                       transcoder_name(pipe_config->cpu_transcoder),
11512                       pipe_config->pipe_bpp, pipe_config->dither);
11513
11514         if (pipe_config->has_pch_encoder)
11515                 intel_dump_m_n_config(pipe_config, "fdi",
11516                                       pipe_config->fdi_lanes,
11517                                       &pipe_config->fdi_m_n);
11518
11519         if (intel_crtc_has_dp_encoder(pipe_config)) {
11520                 intel_dump_m_n_config(pipe_config, "dp m_n",
11521                                 pipe_config->lane_count, &pipe_config->dp_m_n);
11522                 if (pipe_config->has_drrs)
11523                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
11524                                               pipe_config->lane_count,
11525                                               &pipe_config->dp_m2_n2);
11526         }
11527
11528         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11529                       pipe_config->has_audio, pipe_config->has_infoframe);
11530
11531         DRM_DEBUG_KMS("requested mode:\n");
11532         drm_mode_debug_printmodeline(&pipe_config->base.mode);
11533         DRM_DEBUG_KMS("adjusted mode:\n");
11534         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11535         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11536         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11537                       pipe_config->port_clock,
11538                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11539                       pipe_config->pixel_rate);
11540
11541         if (INTEL_GEN(dev_priv) >= 9)
11542                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11543                               crtc->num_scalers,
11544                               pipe_config->scaler_state.scaler_users,
11545                               pipe_config->scaler_state.scaler_id);
11546
11547         if (HAS_GMCH(dev_priv))
11548                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11549                               pipe_config->gmch_pfit.control,
11550                               pipe_config->gmch_pfit.pgm_ratios,
11551                               pipe_config->gmch_pfit.lvds_border_bits);
11552         else
11553                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11554                               pipe_config->pch_pfit.pos,
11555                               pipe_config->pch_pfit.size,
11556                               enableddisabled(pipe_config->pch_pfit.enabled));
11557
11558         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11559                       pipe_config->ips_enabled, pipe_config->double_wide);
11560
11561         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11562
11563         DRM_DEBUG_KMS("planes on this crtc\n");
11564         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11565                 struct drm_format_name_buf format_name;
11566                 intel_plane = to_intel_plane(plane);
11567                 if (intel_plane->pipe != crtc->pipe)
11568                         continue;
11569
11570                 state = to_intel_plane_state(plane->state);
11571                 fb = state->base.fb;
11572                 if (!fb) {
11573                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11574                                       plane->base.id, plane->name, state->scaler_id);
11575                         continue;
11576                 }
11577
11578                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11579                               plane->base.id, plane->name,
11580                               fb->base.id, fb->width, fb->height,
11581                               drm_get_format_name(fb->format->format, &format_name));
11582                 if (INTEL_GEN(dev_priv) >= 9)
11583                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11584                                       state->scaler_id,
11585                                       state->base.src.x1 >> 16,
11586                                       state->base.src.y1 >> 16,
11587                                       drm_rect_width(&state->base.src) >> 16,
11588                                       drm_rect_height(&state->base.src) >> 16,
11589                                       state->base.dst.x1, state->base.dst.y1,
11590                                       drm_rect_width(&state->base.dst),
11591                                       drm_rect_height(&state->base.dst));
11592         }
11593 }
11594
11595 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11596 {
11597         struct drm_device *dev = state->dev;
11598         struct drm_connector *connector;
11599         struct drm_connector_list_iter conn_iter;
11600         unsigned int used_ports = 0;
11601         unsigned int used_mst_ports = 0;
11602         bool ret = true;
11603
11604         /*
11605          * Walk the connector list instead of the encoder
11606          * list to detect the problem on ddi platforms
11607          * where there's just one encoder per digital port.
11608          */
11609         drm_connector_list_iter_begin(dev, &conn_iter);
11610         drm_for_each_connector_iter(connector, &conn_iter) {
11611                 struct drm_connector_state *connector_state;
11612                 struct intel_encoder *encoder;
11613
11614                 connector_state = drm_atomic_get_new_connector_state(state, connector);
11615                 if (!connector_state)
11616                         connector_state = connector->state;
11617
11618                 if (!connector_state->best_encoder)
11619                         continue;
11620
11621                 encoder = to_intel_encoder(connector_state->best_encoder);
11622
11623                 WARN_ON(!connector_state->crtc);
11624
11625                 switch (encoder->type) {
11626                         unsigned int port_mask;
11627                 case INTEL_OUTPUT_DDI:
11628                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11629                                 break;
11630                         /* else: fall through */
11631                 case INTEL_OUTPUT_DP:
11632                 case INTEL_OUTPUT_HDMI:
11633                 case INTEL_OUTPUT_EDP:
11634                         port_mask = 1 << encoder->port;
11635
11636                         /* the same port mustn't appear more than once */
11637                         if (used_ports & port_mask)
11638                                 ret = false;
11639
11640                         used_ports |= port_mask;
11641                         break;
11642                 case INTEL_OUTPUT_DP_MST:
11643                         used_mst_ports |=
11644                                 1 << encoder->port;
11645                         break;
11646                 default:
11647                         break;
11648                 }
11649         }
11650         drm_connector_list_iter_end(&conn_iter);
11651
11652         /* can't mix MST and SST/HDMI on the same port */
11653         if (used_ports & used_mst_ports)
11654                 return false;
11655
11656         return ret;
11657 }
11658
11659 static int
11660 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11661 {
11662         struct drm_i915_private *dev_priv =
11663                 to_i915(crtc_state->base.crtc->dev);
11664         struct intel_crtc_state *saved_state;
11665
11666         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
11667         if (!saved_state)
11668                 return -ENOMEM;
11669
11670         /* FIXME: before the switch to atomic started, a new pipe_config was
11671          * kzalloc'd. Code that depends on any field being zero should be
11672          * fixed, so that the crtc_state can be safely duplicated. For now,
11673          * only fields that are know to not cause problems are preserved. */
11674
11675         saved_state->scaler_state = crtc_state->scaler_state;
11676         saved_state->shared_dpll = crtc_state->shared_dpll;
11677         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
11678         saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
11679         saved_state->ips_force_disable = crtc_state->ips_force_disable;
11680         if (IS_G4X(dev_priv) ||
11681             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11682                 saved_state->wm = crtc_state->wm;
11683
11684         /* Keep base drm_crtc_state intact, only clear our extended struct */
11685         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11686         memcpy(&crtc_state->base + 1, &saved_state->base + 1,
11687                sizeof(*crtc_state) - sizeof(crtc_state->base));
11688
11689         kfree(saved_state);
11690         return 0;
11691 }
11692
11693 static int
11694 intel_modeset_pipe_config(struct drm_crtc *crtc,
11695                           struct intel_crtc_state *pipe_config)
11696 {
11697         struct drm_atomic_state *state = pipe_config->base.state;
11698         struct intel_encoder *encoder;
11699         struct drm_connector *connector;
11700         struct drm_connector_state *connector_state;
11701         int base_bpp, ret;
11702         int i;
11703         bool retry = true;
11704
11705         ret = clear_intel_crtc_state(pipe_config);
11706         if (ret)
11707                 return ret;
11708
11709         pipe_config->cpu_transcoder =
11710                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11711
11712         /*
11713          * Sanitize sync polarity flags based on requested ones. If neither
11714          * positive or negative polarity is requested, treat this as meaning
11715          * negative polarity.
11716          */
11717         if (!(pipe_config->base.adjusted_mode.flags &
11718               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11719                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11720
11721         if (!(pipe_config->base.adjusted_mode.flags &
11722               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11723                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11724
11725         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11726                                         pipe_config);
11727         if (ret)
11728                 return ret;
11729
11730         base_bpp = pipe_config->pipe_bpp;
11731
11732         /*
11733          * Determine the real pipe dimensions. Note that stereo modes can
11734          * increase the actual pipe size due to the frame doubling and
11735          * insertion of additional space for blanks between the frame. This
11736          * is stored in the crtc timings. We use the requested mode to do this
11737          * computation to clearly distinguish it from the adjusted mode, which
11738          * can be changed by the connectors in the below retry loop.
11739          */
11740         drm_mode_get_hv_timing(&pipe_config->base.mode,
11741                                &pipe_config->pipe_src_w,
11742                                &pipe_config->pipe_src_h);
11743
11744         for_each_new_connector_in_state(state, connector, connector_state, i) {
11745                 if (connector_state->crtc != crtc)
11746                         continue;
11747
11748                 encoder = to_intel_encoder(connector_state->best_encoder);
11749
11750                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11751                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11752                         return -EINVAL;
11753                 }
11754
11755                 /*
11756                  * Determine output_types before calling the .compute_config()
11757                  * hooks so that the hooks can use this information safely.
11758                  */
11759                 if (encoder->compute_output_type)
11760                         pipe_config->output_types |=
11761                                 BIT(encoder->compute_output_type(encoder, pipe_config,
11762                                                                  connector_state));
11763                 else
11764                         pipe_config->output_types |= BIT(encoder->type);
11765         }
11766
11767 encoder_retry:
11768         /* Ensure the port clock defaults are reset when retrying. */
11769         pipe_config->port_clock = 0;
11770         pipe_config->pixel_multiplier = 1;
11771
11772         /* Fill in default crtc timings, allow encoders to overwrite them. */
11773         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11774                               CRTC_STEREO_DOUBLE);
11775
11776         /* Pass our mode to the connectors and the CRTC to give them a chance to
11777          * adjust it according to limitations or connector properties, and also
11778          * a chance to reject the mode entirely.
11779          */
11780         for_each_new_connector_in_state(state, connector, connector_state, i) {
11781                 if (connector_state->crtc != crtc)
11782                         continue;
11783
11784                 encoder = to_intel_encoder(connector_state->best_encoder);
11785                 ret = encoder->compute_config(encoder, pipe_config,
11786                                               connector_state);
11787                 if (ret < 0) {
11788                         if (ret != -EDEADLK)
11789                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11790                                               ret);
11791                         return ret;
11792                 }
11793         }
11794
11795         /* Set default port clock if not overwritten by the encoder. Needs to be
11796          * done afterwards in case the encoder adjusts the mode. */
11797         if (!pipe_config->port_clock)
11798                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11799                         * pipe_config->pixel_multiplier;
11800
11801         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11802         if (ret == -EDEADLK)
11803                 return ret;
11804         if (ret < 0) {
11805                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11806                 return ret;
11807         }
11808
11809         if (ret == RETRY) {
11810                 if (WARN(!retry, "loop in pipe configuration computation\n"))
11811                         return -EINVAL;
11812
11813                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11814                 retry = false;
11815                 goto encoder_retry;
11816         }
11817
11818         /* Dithering seems to not pass-through bits correctly when it should, so
11819          * only enable it on 6bpc panels and when its not a compliance
11820          * test requesting 6bpc video pattern.
11821          */
11822         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11823                 !pipe_config->dither_force_disable;
11824         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11825                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11826
11827         return 0;
11828 }
11829
11830 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11831 {
11832         int diff;
11833
11834         if (clock1 == clock2)
11835                 return true;
11836
11837         if (!clock1 || !clock2)
11838                 return false;
11839
11840         diff = abs(clock1 - clock2);
11841
11842         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11843                 return true;
11844
11845         return false;
11846 }
11847
11848 static bool
11849 intel_compare_m_n(unsigned int m, unsigned int n,
11850                   unsigned int m2, unsigned int n2,
11851                   bool exact)
11852 {
11853         if (m == m2 && n == n2)
11854                 return true;
11855
11856         if (exact || !m || !n || !m2 || !n2)
11857                 return false;
11858
11859         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11860
11861         if (n > n2) {
11862                 while (n > n2) {
11863                         m2 <<= 1;
11864                         n2 <<= 1;
11865                 }
11866         } else if (n < n2) {
11867                 while (n < n2) {
11868                         m <<= 1;
11869                         n <<= 1;
11870                 }
11871         }
11872
11873         if (n != n2)
11874                 return false;
11875
11876         return intel_fuzzy_clock_check(m, m2);
11877 }
11878
11879 static bool
11880 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11881                        struct intel_link_m_n *m2_n2,
11882                        bool adjust)
11883 {
11884         if (m_n->tu == m2_n2->tu &&
11885             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11886                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11887             intel_compare_m_n(m_n->link_m, m_n->link_n,
11888                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
11889                 if (adjust)
11890                         *m2_n2 = *m_n;
11891
11892                 return true;
11893         }
11894
11895         return false;
11896 }
11897
11898 static void __printf(3, 4)
11899 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11900 {
11901         struct va_format vaf;
11902         va_list args;
11903
11904         va_start(args, format);
11905         vaf.fmt = format;
11906         vaf.va = &args;
11907
11908         if (adjust)
11909                 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11910         else
11911                 drm_err("mismatch in %s %pV", name, &vaf);
11912
11913         va_end(args);
11914 }
11915
11916 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
11917 {
11918         if (i915_modparams.fastboot != -1)
11919                 return i915_modparams.fastboot;
11920
11921         /* Enable fastboot by default on Skylake and newer */
11922         if (INTEL_GEN(dev_priv) >= 9)
11923                 return true;
11924
11925         /* Enable fastboot by default on VLV and CHV */
11926         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11927                 return true;
11928
11929         /* Disabled by default on all others */
11930         return false;
11931 }
11932
11933 static bool
11934 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11935                           struct intel_crtc_state *current_config,
11936                           struct intel_crtc_state *pipe_config,
11937                           bool adjust)
11938 {
11939         bool ret = true;
11940         bool fixup_inherited = adjust &&
11941                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11942                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11943
11944         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
11945                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
11946                 ret = false;
11947         }
11948
11949 #define PIPE_CONF_CHECK_X(name) do { \
11950         if (current_config->name != pipe_config->name) { \
11951                 pipe_config_err(adjust, __stringify(name), \
11952                           "(expected 0x%08x, found 0x%08x)\n", \
11953                           current_config->name, \
11954                           pipe_config->name); \
11955                 ret = false; \
11956         } \
11957 } while (0)
11958
11959 #define PIPE_CONF_CHECK_I(name) do { \
11960         if (current_config->name != pipe_config->name) { \
11961                 pipe_config_err(adjust, __stringify(name), \
11962                           "(expected %i, found %i)\n", \
11963                           current_config->name, \
11964                           pipe_config->name); \
11965                 ret = false; \
11966         } \
11967 } while (0)
11968
11969 #define PIPE_CONF_CHECK_BOOL(name) do { \
11970         if (current_config->name != pipe_config->name) { \
11971                 pipe_config_err(adjust, __stringify(name), \
11972                           "(expected %s, found %s)\n", \
11973                           yesno(current_config->name), \
11974                           yesno(pipe_config->name)); \
11975                 ret = false; \
11976         } \
11977 } while (0)
11978
11979 /*
11980  * Checks state where we only read out the enabling, but not the entire
11981  * state itself (like full infoframes or ELD for audio). These states
11982  * require a full modeset on bootup to fix up.
11983  */
11984 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11985         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11986                 PIPE_CONF_CHECK_BOOL(name); \
11987         } else { \
11988                 pipe_config_err(adjust, __stringify(name), \
11989                           "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11990                           yesno(current_config->name), \
11991                           yesno(pipe_config->name)); \
11992                 ret = false; \
11993         } \
11994 } while (0)
11995
11996 #define PIPE_CONF_CHECK_P(name) do { \
11997         if (current_config->name != pipe_config->name) { \
11998                 pipe_config_err(adjust, __stringify(name), \
11999                           "(expected %p, found %p)\n", \
12000                           current_config->name, \
12001                           pipe_config->name); \
12002                 ret = false; \
12003         } \
12004 } while (0)
12005
12006 #define PIPE_CONF_CHECK_M_N(name) do { \
12007         if (!intel_compare_link_m_n(&current_config->name, \
12008                                     &pipe_config->name,\
12009                                     adjust)) { \
12010                 pipe_config_err(adjust, __stringify(name), \
12011                           "(expected tu %i gmch %i/%i link %i/%i, " \
12012                           "found tu %i, gmch %i/%i link %i/%i)\n", \
12013                           current_config->name.tu, \
12014                           current_config->name.gmch_m, \
12015                           current_config->name.gmch_n, \
12016                           current_config->name.link_m, \
12017                           current_config->name.link_n, \
12018                           pipe_config->name.tu, \
12019                           pipe_config->name.gmch_m, \
12020                           pipe_config->name.gmch_n, \
12021                           pipe_config->name.link_m, \
12022                           pipe_config->name.link_n); \
12023                 ret = false; \
12024         } \
12025 } while (0)
12026
12027 /* This is required for BDW+ where there is only one set of registers for
12028  * switching between high and low RR.
12029  * This macro can be used whenever a comparison has to be made between one
12030  * hw state and multiple sw state variables.
12031  */
12032 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12033         if (!intel_compare_link_m_n(&current_config->name, \
12034                                     &pipe_config->name, adjust) && \
12035             !intel_compare_link_m_n(&current_config->alt_name, \
12036                                     &pipe_config->name, adjust)) { \
12037                 pipe_config_err(adjust, __stringify(name), \
12038                           "(expected tu %i gmch %i/%i link %i/%i, " \
12039                           "or tu %i gmch %i/%i link %i/%i, " \
12040                           "found tu %i, gmch %i/%i link %i/%i)\n", \
12041                           current_config->name.tu, \
12042                           current_config->name.gmch_m, \
12043                           current_config->name.gmch_n, \
12044                           current_config->name.link_m, \
12045                           current_config->name.link_n, \
12046                           current_config->alt_name.tu, \
12047                           current_config->alt_name.gmch_m, \
12048                           current_config->alt_name.gmch_n, \
12049                           current_config->alt_name.link_m, \
12050                           current_config->alt_name.link_n, \
12051                           pipe_config->name.tu, \
12052                           pipe_config->name.gmch_m, \
12053                           pipe_config->name.gmch_n, \
12054                           pipe_config->name.link_m, \
12055                           pipe_config->name.link_n); \
12056                 ret = false; \
12057         } \
12058 } while (0)
12059
12060 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12061         if ((current_config->name ^ pipe_config->name) & (mask)) { \
12062                 pipe_config_err(adjust, __stringify(name), \
12063                           "(%x) (expected %i, found %i)\n", \
12064                           (mask), \
12065                           current_config->name & (mask), \
12066                           pipe_config->name & (mask)); \
12067                 ret = false; \
12068         } \
12069 } while (0)
12070
12071 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12072         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12073                 pipe_config_err(adjust, __stringify(name), \
12074                           "(expected %i, found %i)\n", \
12075                           current_config->name, \
12076                           pipe_config->name); \
12077                 ret = false; \
12078         } \
12079 } while (0)
12080
12081 #define PIPE_CONF_QUIRK(quirk)  \
12082         ((current_config->quirks | pipe_config->quirks) & (quirk))
12083
12084         PIPE_CONF_CHECK_I(cpu_transcoder);
12085
12086         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12087         PIPE_CONF_CHECK_I(fdi_lanes);
12088         PIPE_CONF_CHECK_M_N(fdi_m_n);
12089
12090         PIPE_CONF_CHECK_I(lane_count);
12091         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12092
12093         if (INTEL_GEN(dev_priv) < 8) {
12094                 PIPE_CONF_CHECK_M_N(dp_m_n);
12095
12096                 if (current_config->has_drrs)
12097                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
12098         } else
12099                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12100
12101         PIPE_CONF_CHECK_X(output_types);
12102
12103         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12104         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12105         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12106         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12107         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12108         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12109
12110         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12111         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12112         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12113         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12114         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12115         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12116
12117         PIPE_CONF_CHECK_I(pixel_multiplier);
12118         PIPE_CONF_CHECK_I(output_format);
12119         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12120         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12121             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12122                 PIPE_CONF_CHECK_BOOL(limited_color_range);
12123
12124         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12125         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12126         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
12127
12128         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12129
12130         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12131                               DRM_MODE_FLAG_INTERLACE);
12132
12133         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12134                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12135                                       DRM_MODE_FLAG_PHSYNC);
12136                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12137                                       DRM_MODE_FLAG_NHSYNC);
12138                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12139                                       DRM_MODE_FLAG_PVSYNC);
12140                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12141                                       DRM_MODE_FLAG_NVSYNC);
12142         }
12143
12144         PIPE_CONF_CHECK_X(gmch_pfit.control);
12145         /* pfit ratios are autocomputed by the hw on gen4+ */
12146         if (INTEL_GEN(dev_priv) < 4)
12147                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12148         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12149
12150         if (!adjust) {
12151                 PIPE_CONF_CHECK_I(pipe_src_w);
12152                 PIPE_CONF_CHECK_I(pipe_src_h);
12153
12154                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12155                 if (current_config->pch_pfit.enabled) {
12156                         PIPE_CONF_CHECK_X(pch_pfit.pos);
12157                         PIPE_CONF_CHECK_X(pch_pfit.size);
12158                 }
12159
12160                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12161                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12162         }
12163
12164         PIPE_CONF_CHECK_BOOL(double_wide);
12165
12166         PIPE_CONF_CHECK_P(shared_dpll);
12167         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12168         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12169         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12170         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12171         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12172         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12173         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12174         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12175         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12176         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12177         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12178         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12179         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12180         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12181         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12182         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12183         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12184         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12185         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12186         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12187         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12188         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12189         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12190         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12191         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12192         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12193         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12194         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12195         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12196         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12197         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12198
12199         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12200         PIPE_CONF_CHECK_X(dsi_pll.div);
12201
12202         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12203                 PIPE_CONF_CHECK_I(pipe_bpp);
12204
12205         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12206         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12207
12208         PIPE_CONF_CHECK_I(min_voltage_level);
12209
12210 #undef PIPE_CONF_CHECK_X
12211 #undef PIPE_CONF_CHECK_I
12212 #undef PIPE_CONF_CHECK_BOOL
12213 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12214 #undef PIPE_CONF_CHECK_P
12215 #undef PIPE_CONF_CHECK_FLAGS
12216 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12217 #undef PIPE_CONF_QUIRK
12218
12219         return ret;
12220 }
12221
12222 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12223                                            const struct intel_crtc_state *pipe_config)
12224 {
12225         if (pipe_config->has_pch_encoder) {
12226                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12227                                                             &pipe_config->fdi_m_n);
12228                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12229
12230                 /*
12231                  * FDI already provided one idea for the dotclock.
12232                  * Yell if the encoder disagrees.
12233                  */
12234                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12235                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12236                      fdi_dotclock, dotclock);
12237         }
12238 }
12239
12240 static void verify_wm_state(struct drm_crtc *crtc,
12241                             struct drm_crtc_state *new_state)
12242 {
12243         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12244         struct skl_ddb_allocation hw_ddb, *sw_ddb;
12245         struct skl_pipe_wm hw_wm, *sw_wm;
12246         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12247         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12248         struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
12249         struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
12250         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12251         const enum pipe pipe = intel_crtc->pipe;
12252         int plane, level, max_level = ilk_wm_max_level(dev_priv);
12253
12254         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12255                 return;
12256
12257         skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
12258         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12259
12260         skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
12261
12262         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12263         sw_ddb = &dev_priv->wm.skl_hw.ddb;
12264
12265         if (INTEL_GEN(dev_priv) >= 11)
12266                 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
12267                         DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12268                                   sw_ddb->enabled_slices,
12269                                   hw_ddb.enabled_slices);
12270         /* planes */
12271         for_each_universal_plane(dev_priv, pipe, plane) {
12272                 hw_plane_wm = &hw_wm.planes[plane];
12273                 sw_plane_wm = &sw_wm->planes[plane];
12274
12275                 /* Watermarks */
12276                 for (level = 0; level <= max_level; level++) {
12277                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12278                                                 &sw_plane_wm->wm[level]))
12279                                 continue;
12280
12281                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12282                                   pipe_name(pipe), plane + 1, level,
12283                                   sw_plane_wm->wm[level].plane_en,
12284                                   sw_plane_wm->wm[level].plane_res_b,
12285                                   sw_plane_wm->wm[level].plane_res_l,
12286                                   hw_plane_wm->wm[level].plane_en,
12287                                   hw_plane_wm->wm[level].plane_res_b,
12288                                   hw_plane_wm->wm[level].plane_res_l);
12289                 }
12290
12291                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12292                                          &sw_plane_wm->trans_wm)) {
12293                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12294                                   pipe_name(pipe), plane + 1,
12295                                   sw_plane_wm->trans_wm.plane_en,
12296                                   sw_plane_wm->trans_wm.plane_res_b,
12297                                   sw_plane_wm->trans_wm.plane_res_l,
12298                                   hw_plane_wm->trans_wm.plane_en,
12299                                   hw_plane_wm->trans_wm.plane_res_b,
12300                                   hw_plane_wm->trans_wm.plane_res_l);
12301                 }
12302
12303                 /* DDB */
12304                 hw_ddb_entry = &hw_ddb_y[plane];
12305                 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12306
12307                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12308                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12309                                   pipe_name(pipe), plane + 1,
12310                                   sw_ddb_entry->start, sw_ddb_entry->end,
12311                                   hw_ddb_entry->start, hw_ddb_entry->end);
12312                 }
12313         }
12314
12315         /*
12316          * cursor
12317          * If the cursor plane isn't active, we may not have updated it's ddb
12318          * allocation. In that case since the ddb allocation will be updated
12319          * once the plane becomes visible, we can skip this check
12320          */
12321         if (1) {
12322                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
12323                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12324
12325                 /* Watermarks */
12326                 for (level = 0; level <= max_level; level++) {
12327                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12328                                                 &sw_plane_wm->wm[level]))
12329                                 continue;
12330
12331                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12332                                   pipe_name(pipe), level,
12333                                   sw_plane_wm->wm[level].plane_en,
12334                                   sw_plane_wm->wm[level].plane_res_b,
12335                                   sw_plane_wm->wm[level].plane_res_l,
12336                                   hw_plane_wm->wm[level].plane_en,
12337                                   hw_plane_wm->wm[level].plane_res_b,
12338                                   hw_plane_wm->wm[level].plane_res_l);
12339                 }
12340
12341                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12342                                          &sw_plane_wm->trans_wm)) {
12343                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12344                                   pipe_name(pipe),
12345                                   sw_plane_wm->trans_wm.plane_en,
12346                                   sw_plane_wm->trans_wm.plane_res_b,
12347                                   sw_plane_wm->trans_wm.plane_res_l,
12348                                   hw_plane_wm->trans_wm.plane_en,
12349                                   hw_plane_wm->trans_wm.plane_res_b,
12350                                   hw_plane_wm->trans_wm.plane_res_l);
12351                 }
12352
12353                 /* DDB */
12354                 hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
12355                 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12356
12357                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12358                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12359                                   pipe_name(pipe),
12360                                   sw_ddb_entry->start, sw_ddb_entry->end,
12361                                   hw_ddb_entry->start, hw_ddb_entry->end);
12362                 }
12363         }
12364 }
12365
12366 static void
12367 verify_connector_state(struct drm_device *dev,
12368                        struct drm_atomic_state *state,
12369                        struct drm_crtc *crtc)
12370 {
12371         struct drm_connector *connector;
12372         struct drm_connector_state *new_conn_state;
12373         int i;
12374
12375         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12376                 struct drm_encoder *encoder = connector->encoder;
12377                 struct drm_crtc_state *crtc_state = NULL;
12378
12379                 if (new_conn_state->crtc != crtc)
12380                         continue;
12381
12382                 if (crtc)
12383                         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12384
12385                 intel_connector_verify_state(crtc_state, new_conn_state);
12386
12387                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12388                      "connector's atomic encoder doesn't match legacy encoder\n");
12389         }
12390 }
12391
12392 static void
12393 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12394 {
12395         struct intel_encoder *encoder;
12396         struct drm_connector *connector;
12397         struct drm_connector_state *old_conn_state, *new_conn_state;
12398         int i;
12399
12400         for_each_intel_encoder(dev, encoder) {
12401                 bool enabled = false, found = false;
12402                 enum pipe pipe;
12403
12404                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12405                               encoder->base.base.id,
12406                               encoder->base.name);
12407
12408                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12409                                                    new_conn_state, i) {
12410                         if (old_conn_state->best_encoder == &encoder->base)
12411                                 found = true;
12412
12413                         if (new_conn_state->best_encoder != &encoder->base)
12414                                 continue;
12415                         found = enabled = true;
12416
12417                         I915_STATE_WARN(new_conn_state->crtc !=
12418                                         encoder->base.crtc,
12419                              "connector's crtc doesn't match encoder crtc\n");
12420                 }
12421
12422                 if (!found)
12423                         continue;
12424
12425                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12426                      "encoder's enabled state mismatch "
12427                      "(expected %i, found %i)\n",
12428                      !!encoder->base.crtc, enabled);
12429
12430                 if (!encoder->base.crtc) {
12431                         bool active;
12432
12433                         active = encoder->get_hw_state(encoder, &pipe);
12434                         I915_STATE_WARN(active,
12435                              "encoder detached but still enabled on pipe %c.\n",
12436                              pipe_name(pipe));
12437                 }
12438         }
12439 }
12440
12441 static void
12442 verify_crtc_state(struct drm_crtc *crtc,
12443                   struct drm_crtc_state *old_crtc_state,
12444                   struct drm_crtc_state *new_crtc_state)
12445 {
12446         struct drm_device *dev = crtc->dev;
12447         struct drm_i915_private *dev_priv = to_i915(dev);
12448         struct intel_encoder *encoder;
12449         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12450         struct intel_crtc_state *pipe_config, *sw_config;
12451         struct drm_atomic_state *old_state;
12452         bool active;
12453
12454         old_state = old_crtc_state->state;
12455         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12456         pipe_config = to_intel_crtc_state(old_crtc_state);
12457         memset(pipe_config, 0, sizeof(*pipe_config));
12458         pipe_config->base.crtc = crtc;
12459         pipe_config->base.state = old_state;
12460
12461         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12462
12463         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12464
12465         /* we keep both pipes enabled on 830 */
12466         if (IS_I830(dev_priv))
12467                 active = new_crtc_state->active;
12468
12469         I915_STATE_WARN(new_crtc_state->active != active,
12470              "crtc active state doesn't match with hw state "
12471              "(expected %i, found %i)\n", new_crtc_state->active, active);
12472
12473         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12474              "transitional active state does not match atomic hw state "
12475              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12476
12477         for_each_encoder_on_crtc(dev, crtc, encoder) {
12478                 enum pipe pipe;
12479
12480                 active = encoder->get_hw_state(encoder, &pipe);
12481                 I915_STATE_WARN(active != new_crtc_state->active,
12482                         "[ENCODER:%i] active %i with crtc active %i\n",
12483                         encoder->base.base.id, active, new_crtc_state->active);
12484
12485                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12486                                 "Encoder connected to wrong pipe %c\n",
12487                                 pipe_name(pipe));
12488
12489                 if (active)
12490                         encoder->get_config(encoder, pipe_config);
12491         }
12492
12493         intel_crtc_compute_pixel_rate(pipe_config);
12494
12495         if (!new_crtc_state->active)
12496                 return;
12497
12498         intel_pipe_config_sanity_check(dev_priv, pipe_config);
12499
12500         sw_config = to_intel_crtc_state(new_crtc_state);
12501         if (!intel_pipe_config_compare(dev_priv, sw_config,
12502                                        pipe_config, false)) {
12503                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12504                 intel_dump_pipe_config(intel_crtc, pipe_config,
12505                                        "[hw state]");
12506                 intel_dump_pipe_config(intel_crtc, sw_config,
12507                                        "[sw state]");
12508         }
12509 }
12510
12511 static void
12512 intel_verify_planes(struct intel_atomic_state *state)
12513 {
12514         struct intel_plane *plane;
12515         const struct intel_plane_state *plane_state;
12516         int i;
12517
12518         for_each_new_intel_plane_in_state(state, plane,
12519                                           plane_state, i)
12520                 assert_plane(plane, plane_state->base.visible);
12521 }
12522
12523 static void
12524 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12525                          struct intel_shared_dpll *pll,
12526                          struct drm_crtc *crtc,
12527                          struct drm_crtc_state *new_state)
12528 {
12529         struct intel_dpll_hw_state dpll_hw_state;
12530         unsigned int crtc_mask;
12531         bool active;
12532
12533         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12534
12535         DRM_DEBUG_KMS("%s\n", pll->info->name);
12536
12537         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
12538
12539         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
12540                 I915_STATE_WARN(!pll->on && pll->active_mask,
12541                      "pll in active use but not on in sw tracking\n");
12542                 I915_STATE_WARN(pll->on && !pll->active_mask,
12543                      "pll is on but not used by any active crtc\n");
12544                 I915_STATE_WARN(pll->on != active,
12545                      "pll on state mismatch (expected %i, found %i)\n",
12546                      pll->on, active);
12547         }
12548
12549         if (!crtc) {
12550                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
12551                                 "more active pll users than references: %x vs %x\n",
12552                                 pll->active_mask, pll->state.crtc_mask);
12553
12554                 return;
12555         }
12556
12557         crtc_mask = drm_crtc_mask(crtc);
12558
12559         if (new_state->active)
12560                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12561                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12562                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12563         else
12564                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12565                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12566                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12567
12568         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
12569                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12570                         crtc_mask, pll->state.crtc_mask);
12571
12572         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
12573                                           &dpll_hw_state,
12574                                           sizeof(dpll_hw_state)),
12575                         "pll hw state mismatch\n");
12576 }
12577
12578 static void
12579 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12580                          struct drm_crtc_state *old_crtc_state,
12581                          struct drm_crtc_state *new_crtc_state)
12582 {
12583         struct drm_i915_private *dev_priv = to_i915(dev);
12584         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12585         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12586
12587         if (new_state->shared_dpll)
12588                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12589
12590         if (old_state->shared_dpll &&
12591             old_state->shared_dpll != new_state->shared_dpll) {
12592                 unsigned int crtc_mask = drm_crtc_mask(crtc);
12593                 struct intel_shared_dpll *pll = old_state->shared_dpll;
12594
12595                 I915_STATE_WARN(pll->active_mask & crtc_mask,
12596                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12597                                 pipe_name(drm_crtc_index(crtc)));
12598                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
12599                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12600                                 pipe_name(drm_crtc_index(crtc)));
12601         }
12602 }
12603
12604 static void
12605 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12606                           struct drm_atomic_state *state,
12607                           struct drm_crtc_state *old_state,
12608                           struct drm_crtc_state *new_state)
12609 {
12610         if (!needs_modeset(new_state) &&
12611             !to_intel_crtc_state(new_state)->update_pipe)
12612                 return;
12613
12614         verify_wm_state(crtc, new_state);
12615         verify_connector_state(crtc->dev, state, crtc);
12616         verify_crtc_state(crtc, old_state, new_state);
12617         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12618 }
12619
12620 static void
12621 verify_disabled_dpll_state(struct drm_device *dev)
12622 {
12623         struct drm_i915_private *dev_priv = to_i915(dev);
12624         int i;
12625
12626         for (i = 0; i < dev_priv->num_shared_dpll; i++)
12627                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12628 }
12629
12630 static void
12631 intel_modeset_verify_disabled(struct drm_device *dev,
12632                               struct drm_atomic_state *state)
12633 {
12634         verify_encoder_state(dev, state);
12635         verify_connector_state(dev, state, NULL);
12636         verify_disabled_dpll_state(dev);
12637 }
12638
12639 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
12640 {
12641         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
12642         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12643
12644         /*
12645          * The scanline counter increments at the leading edge of hsync.
12646          *
12647          * On most platforms it starts counting from vtotal-1 on the
12648          * first active line. That means the scanline counter value is
12649          * always one less than what we would expect. Ie. just after
12650          * start of vblank, which also occurs at start of hsync (on the
12651          * last active line), the scanline counter will read vblank_start-1.
12652          *
12653          * On gen2 the scanline counter starts counting from 1 instead
12654          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12655          * to keep the value positive), instead of adding one.
12656          *
12657          * On HSW+ the behaviour of the scanline counter depends on the output
12658          * type. For DP ports it behaves like most other platforms, but on HDMI
12659          * there's an extra 1 line difference. So we need to add two instead of
12660          * one to the value.
12661          *
12662          * On VLV/CHV DSI the scanline counter would appear to increment
12663          * approx. 1/3 of a scanline before start of vblank. Unfortunately
12664          * that means we can't tell whether we're in vblank or not while
12665          * we're on that particular line. We must still set scanline_offset
12666          * to 1 so that the vblank timestamps come out correct when we query
12667          * the scanline counter from within the vblank interrupt handler.
12668          * However if queried just before the start of vblank we'll get an
12669          * answer that's slightly in the future.
12670          */
12671         if (IS_GEN(dev_priv, 2)) {
12672                 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
12673                 int vtotal;
12674
12675                 vtotal = adjusted_mode->crtc_vtotal;
12676                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12677                         vtotal /= 2;
12678
12679                 crtc->scanline_offset = vtotal - 1;
12680         } else if (HAS_DDI(dev_priv) &&
12681                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
12682                 crtc->scanline_offset = 2;
12683         } else
12684                 crtc->scanline_offset = 1;
12685 }
12686
12687 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12688 {
12689         struct drm_device *dev = state->dev;
12690         struct drm_i915_private *dev_priv = to_i915(dev);
12691         struct drm_crtc *crtc;
12692         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12693         int i;
12694
12695         if (!dev_priv->display.crtc_compute_clock)
12696                 return;
12697
12698         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12699                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12700                 struct intel_shared_dpll *old_dpll =
12701                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12702
12703                 if (!needs_modeset(new_crtc_state))
12704                         continue;
12705
12706                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12707
12708                 if (!old_dpll)
12709                         continue;
12710
12711                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12712         }
12713 }
12714
12715 /*
12716  * This implements the workaround described in the "notes" section of the mode
12717  * set sequence documentation. When going from no pipes or single pipe to
12718  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12719  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12720  */
12721 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12722 {
12723         struct drm_crtc_state *crtc_state;
12724         struct intel_crtc *intel_crtc;
12725         struct drm_crtc *crtc;
12726         struct intel_crtc_state *first_crtc_state = NULL;
12727         struct intel_crtc_state *other_crtc_state = NULL;
12728         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12729         int i;
12730
12731         /* look at all crtc's that are going to be enabled in during modeset */
12732         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12733                 intel_crtc = to_intel_crtc(crtc);
12734
12735                 if (!crtc_state->active || !needs_modeset(crtc_state))
12736                         continue;
12737
12738                 if (first_crtc_state) {
12739                         other_crtc_state = to_intel_crtc_state(crtc_state);
12740                         break;
12741                 } else {
12742                         first_crtc_state = to_intel_crtc_state(crtc_state);
12743                         first_pipe = intel_crtc->pipe;
12744                 }
12745         }
12746
12747         /* No workaround needed? */
12748         if (!first_crtc_state)
12749                 return 0;
12750
12751         /* w/a possibly needed, check how many crtc's are already enabled. */
12752         for_each_intel_crtc(state->dev, intel_crtc) {
12753                 struct intel_crtc_state *pipe_config;
12754
12755                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12756                 if (IS_ERR(pipe_config))
12757                         return PTR_ERR(pipe_config);
12758
12759                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12760
12761                 if (!pipe_config->base.active ||
12762                     needs_modeset(&pipe_config->base))
12763                         continue;
12764
12765                 /* 2 or more enabled crtcs means no need for w/a */
12766                 if (enabled_pipe != INVALID_PIPE)
12767                         return 0;
12768
12769                 enabled_pipe = intel_crtc->pipe;
12770         }
12771
12772         if (enabled_pipe != INVALID_PIPE)
12773                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12774         else if (other_crtc_state)
12775                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12776
12777         return 0;
12778 }
12779
12780 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12781 {
12782         struct drm_crtc *crtc;
12783
12784         /* Add all pipes to the state */
12785         for_each_crtc(state->dev, crtc) {
12786                 struct drm_crtc_state *crtc_state;
12787
12788                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12789                 if (IS_ERR(crtc_state))
12790                         return PTR_ERR(crtc_state);
12791         }
12792
12793         return 0;
12794 }
12795
12796 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12797 {
12798         struct drm_crtc *crtc;
12799
12800         /*
12801          * Add all pipes to the state, and force
12802          * a modeset on all the active ones.
12803          */
12804         for_each_crtc(state->dev, crtc) {
12805                 struct drm_crtc_state *crtc_state;
12806                 int ret;
12807
12808                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12809                 if (IS_ERR(crtc_state))
12810                         return PTR_ERR(crtc_state);
12811
12812                 if (!crtc_state->active || needs_modeset(crtc_state))
12813                         continue;
12814
12815                 crtc_state->mode_changed = true;
12816
12817                 ret = drm_atomic_add_affected_connectors(state, crtc);
12818                 if (ret)
12819                         return ret;
12820
12821                 ret = drm_atomic_add_affected_planes(state, crtc);
12822                 if (ret)
12823                         return ret;
12824         }
12825
12826         return 0;
12827 }
12828
12829 static int intel_modeset_checks(struct drm_atomic_state *state)
12830 {
12831         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12832         struct drm_i915_private *dev_priv = to_i915(state->dev);
12833         struct drm_crtc *crtc;
12834         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12835         int ret = 0, i;
12836
12837         if (!check_digital_port_conflicts(state)) {
12838                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12839                 return -EINVAL;
12840         }
12841
12842         intel_state->modeset = true;
12843         intel_state->active_crtcs = dev_priv->active_crtcs;
12844         intel_state->cdclk.logical = dev_priv->cdclk.logical;
12845         intel_state->cdclk.actual = dev_priv->cdclk.actual;
12846
12847         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12848                 if (new_crtc_state->active)
12849                         intel_state->active_crtcs |= 1 << i;
12850                 else
12851                         intel_state->active_crtcs &= ~(1 << i);
12852
12853                 if (old_crtc_state->active != new_crtc_state->active)
12854                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12855         }
12856
12857         /*
12858          * See if the config requires any additional preparation, e.g.
12859          * to adjust global state with pipes off.  We need to do this
12860          * here so we can get the modeset_pipe updated config for the new
12861          * mode set on this crtc.  For other crtcs we need to use the
12862          * adjusted_mode bits in the crtc directly.
12863          */
12864         if (dev_priv->display.modeset_calc_cdclk) {
12865                 ret = dev_priv->display.modeset_calc_cdclk(state);
12866                 if (ret < 0)
12867                         return ret;
12868
12869                 /*
12870                  * Writes to dev_priv->cdclk.logical must protected by
12871                  * holding all the crtc locks, even if we don't end up
12872                  * touching the hardware
12873                  */
12874                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12875                                         &intel_state->cdclk.logical)) {
12876                         ret = intel_lock_all_pipes(state);
12877                         if (ret < 0)
12878                                 return ret;
12879                 }
12880
12881                 /* All pipes must be switched off while we change the cdclk. */
12882                 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12883                                               &intel_state->cdclk.actual)) {
12884                         ret = intel_modeset_all_pipes(state);
12885                         if (ret < 0)
12886                                 return ret;
12887                 }
12888
12889                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12890                               intel_state->cdclk.logical.cdclk,
12891                               intel_state->cdclk.actual.cdclk);
12892                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12893                               intel_state->cdclk.logical.voltage_level,
12894                               intel_state->cdclk.actual.voltage_level);
12895         } else {
12896                 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12897         }
12898
12899         intel_modeset_clear_plls(state);
12900
12901         if (IS_HASWELL(dev_priv))
12902                 return haswell_mode_set_planes_workaround(state);
12903
12904         return 0;
12905 }
12906
12907 /*
12908  * Handle calculation of various watermark data at the end of the atomic check
12909  * phase.  The code here should be run after the per-crtc and per-plane 'check'
12910  * handlers to ensure that all derived state has been updated.
12911  */
12912 static int calc_watermark_data(struct intel_atomic_state *state)
12913 {
12914         struct drm_device *dev = state->base.dev;
12915         struct drm_i915_private *dev_priv = to_i915(dev);
12916
12917         /* Is there platform-specific watermark information to calculate? */
12918         if (dev_priv->display.compute_global_watermarks)
12919                 return dev_priv->display.compute_global_watermarks(state);
12920
12921         return 0;
12922 }
12923
12924 /**
12925  * intel_atomic_check - validate state object
12926  * @dev: drm device
12927  * @state: state to validate
12928  */
12929 static int intel_atomic_check(struct drm_device *dev,
12930                               struct drm_atomic_state *state)
12931 {
12932         struct drm_i915_private *dev_priv = to_i915(dev);
12933         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12934         struct drm_crtc *crtc;
12935         struct drm_crtc_state *old_crtc_state, *crtc_state;
12936         int ret, i;
12937         bool any_ms = false;
12938
12939         /* Catch I915_MODE_FLAG_INHERITED */
12940         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12941                                       crtc_state, i) {
12942                 if (crtc_state->mode.private_flags !=
12943                     old_crtc_state->mode.private_flags)
12944                         crtc_state->mode_changed = true;
12945         }
12946
12947         ret = drm_atomic_helper_check_modeset(dev, state);
12948         if (ret)
12949                 return ret;
12950
12951         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12952                 struct intel_crtc_state *pipe_config =
12953                         to_intel_crtc_state(crtc_state);
12954
12955                 if (!needs_modeset(crtc_state))
12956                         continue;
12957
12958                 if (!crtc_state->enable) {
12959                         any_ms = true;
12960                         continue;
12961                 }
12962
12963                 ret = intel_modeset_pipe_config(crtc, pipe_config);
12964                 if (ret == -EDEADLK)
12965                         return ret;
12966                 if (ret) {
12967                         intel_dump_pipe_config(to_intel_crtc(crtc),
12968                                                pipe_config, "[failed]");
12969                         return ret;
12970                 }
12971
12972                 if (intel_pipe_config_compare(dev_priv,
12973                                         to_intel_crtc_state(old_crtc_state),
12974                                         pipe_config, true)) {
12975                         crtc_state->mode_changed = false;
12976                         pipe_config->update_pipe = true;
12977                 }
12978
12979                 if (needs_modeset(crtc_state))
12980                         any_ms = true;
12981
12982                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12983                                        needs_modeset(crtc_state) ?
12984                                        "[modeset]" : "[fastset]");
12985         }
12986
12987         ret = drm_dp_mst_atomic_check(state);
12988         if (ret)
12989                 return ret;
12990
12991         if (any_ms) {
12992                 ret = intel_modeset_checks(state);
12993
12994                 if (ret)
12995                         return ret;
12996         } else {
12997                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12998         }
12999
13000         ret = icl_add_linked_planes(intel_state);
13001         if (ret)
13002                 return ret;
13003
13004         ret = drm_atomic_helper_check_planes(dev, state);
13005         if (ret)
13006                 return ret;
13007
13008         intel_fbc_choose_crtc(dev_priv, intel_state);
13009         return calc_watermark_data(intel_state);
13010 }
13011
13012 static int intel_atomic_prepare_commit(struct drm_device *dev,
13013                                        struct drm_atomic_state *state)
13014 {
13015         return drm_atomic_helper_prepare_planes(dev, state);
13016 }
13017
13018 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13019 {
13020         struct drm_device *dev = crtc->base.dev;
13021         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13022
13023         if (!vblank->max_vblank_count)
13024                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13025
13026         return dev->driver->get_vblank_counter(dev, crtc->pipe);
13027 }
13028
13029 static void intel_update_crtc(struct drm_crtc *crtc,
13030                               struct drm_atomic_state *state,
13031                               struct drm_crtc_state *old_crtc_state,
13032                               struct drm_crtc_state *new_crtc_state)
13033 {
13034         struct drm_device *dev = crtc->dev;
13035         struct drm_i915_private *dev_priv = to_i915(dev);
13036         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13037         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13038         bool modeset = needs_modeset(new_crtc_state);
13039         struct intel_plane_state *new_plane_state =
13040                 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13041                                                  to_intel_plane(crtc->primary));
13042
13043         if (modeset) {
13044                 update_scanline_offset(pipe_config);
13045                 dev_priv->display.crtc_enable(pipe_config, state);
13046
13047                 /* vblanks work again, re-enable pipe CRC. */
13048                 intel_crtc_enable_pipe_crc(intel_crtc);
13049         } else {
13050                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13051                                        pipe_config);
13052
13053                 if (pipe_config->update_pipe)
13054                         intel_encoders_update_pipe(crtc, pipe_config, state);
13055         }
13056
13057         if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13058                 intel_fbc_disable(intel_crtc);
13059         else if (new_plane_state)
13060                 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13061
13062         intel_begin_crtc_commit(crtc, old_crtc_state);
13063
13064         if (INTEL_GEN(dev_priv) >= 9)
13065                 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13066         else
13067                 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13068
13069         intel_finish_crtc_commit(crtc, old_crtc_state);
13070 }
13071
13072 static void intel_update_crtcs(struct drm_atomic_state *state)
13073 {
13074         struct drm_crtc *crtc;
13075         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13076         int i;
13077
13078         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13079                 if (!new_crtc_state->active)
13080                         continue;
13081
13082                 intel_update_crtc(crtc, state, old_crtc_state,
13083                                   new_crtc_state);
13084         }
13085 }
13086
13087 static void skl_update_crtcs(struct drm_atomic_state *state)
13088 {
13089         struct drm_i915_private *dev_priv = to_i915(state->dev);
13090         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13091         struct drm_crtc *crtc;
13092         struct intel_crtc *intel_crtc;
13093         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13094         struct intel_crtc_state *cstate;
13095         unsigned int updated = 0;
13096         bool progress;
13097         enum pipe pipe;
13098         int i;
13099         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13100         u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13101         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13102
13103         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13104                 /* ignore allocations for crtc's that have been turned off. */
13105                 if (new_crtc_state->active)
13106                         entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13107
13108         /* If 2nd DBuf slice required, enable it here */
13109         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13110                 icl_dbuf_slices_update(dev_priv, required_slices);
13111
13112         /*
13113          * Whenever the number of active pipes changes, we need to make sure we
13114          * update the pipes in the right order so that their ddb allocations
13115          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13116          * cause pipe underruns and other bad stuff.
13117          */
13118         do {
13119                 progress = false;
13120
13121                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13122                         bool vbl_wait = false;
13123                         unsigned int cmask = drm_crtc_mask(crtc);
13124
13125                         intel_crtc = to_intel_crtc(crtc);
13126                         cstate = to_intel_crtc_state(new_crtc_state);
13127                         pipe = intel_crtc->pipe;
13128
13129                         if (updated & cmask || !cstate->base.active)
13130                                 continue;
13131
13132                         if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13133                                                         entries,
13134                                                         INTEL_INFO(dev_priv)->num_pipes, i))
13135                                 continue;
13136
13137                         updated |= cmask;
13138                         entries[i] = cstate->wm.skl.ddb;
13139
13140                         /*
13141                          * If this is an already active pipe, it's DDB changed,
13142                          * and this isn't the last pipe that needs updating
13143                          * then we need to wait for a vblank to pass for the
13144                          * new ddb allocation to take effect.
13145                          */
13146                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13147                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13148                             !new_crtc_state->active_changed &&
13149                             intel_state->wm_results.dirty_pipes != updated)
13150                                 vbl_wait = true;
13151
13152                         intel_update_crtc(crtc, state, old_crtc_state,
13153                                           new_crtc_state);
13154
13155                         if (vbl_wait)
13156                                 intel_wait_for_vblank(dev_priv, pipe);
13157
13158                         progress = true;
13159                 }
13160         } while (progress);
13161
13162         /* If 2nd DBuf slice is no more required disable it */
13163         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13164                 icl_dbuf_slices_update(dev_priv, required_slices);
13165 }
13166
13167 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13168 {
13169         struct intel_atomic_state *state, *next;
13170         struct llist_node *freed;
13171
13172         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13173         llist_for_each_entry_safe(state, next, freed, freed)
13174                 drm_atomic_state_put(&state->base);
13175 }
13176
13177 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13178 {
13179         struct drm_i915_private *dev_priv =
13180                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13181
13182         intel_atomic_helper_free_state(dev_priv);
13183 }
13184
13185 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13186 {
13187         struct wait_queue_entry wait_fence, wait_reset;
13188         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13189
13190         init_wait_entry(&wait_fence, 0);
13191         init_wait_entry(&wait_reset, 0);
13192         for (;;) {
13193                 prepare_to_wait(&intel_state->commit_ready.wait,
13194                                 &wait_fence, TASK_UNINTERRUPTIBLE);
13195                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13196                                 &wait_reset, TASK_UNINTERRUPTIBLE);
13197
13198
13199                 if (i915_sw_fence_done(&intel_state->commit_ready)
13200                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13201                         break;
13202
13203                 schedule();
13204         }
13205         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13206         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13207 }
13208
13209 static void intel_atomic_cleanup_work(struct work_struct *work)
13210 {
13211         struct drm_atomic_state *state =
13212                 container_of(work, struct drm_atomic_state, commit_work);
13213         struct drm_i915_private *i915 = to_i915(state->dev);
13214
13215         drm_atomic_helper_cleanup_planes(&i915->drm, state);
13216         drm_atomic_helper_commit_cleanup_done(state);
13217         drm_atomic_state_put(state);
13218
13219         intel_atomic_helper_free_state(i915);
13220 }
13221
13222 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13223 {
13224         struct drm_device *dev = state->dev;
13225         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13226         struct drm_i915_private *dev_priv = to_i915(dev);
13227         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13228         struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13229         struct drm_crtc *crtc;
13230         struct intel_crtc *intel_crtc;
13231         u64 put_domains[I915_MAX_PIPES] = {};
13232         intel_wakeref_t wakeref = 0;
13233         int i;
13234
13235         intel_atomic_commit_fence_wait(intel_state);
13236
13237         drm_atomic_helper_wait_for_dependencies(state);
13238
13239         if (intel_state->modeset)
13240                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13241
13242         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13243                 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13244                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13245                 intel_crtc = to_intel_crtc(crtc);
13246
13247                 if (needs_modeset(new_crtc_state) ||
13248                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
13249
13250                         put_domains[intel_crtc->pipe] =
13251                                 modeset_get_crtc_power_domains(crtc,
13252                                         new_intel_crtc_state);
13253                 }
13254
13255                 if (!needs_modeset(new_crtc_state))
13256                         continue;
13257
13258                 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13259
13260                 if (old_crtc_state->active) {
13261                         intel_crtc_disable_planes(intel_state, intel_crtc);
13262
13263                         /*
13264                          * We need to disable pipe CRC before disabling the pipe,
13265                          * or we race against vblank off.
13266                          */
13267                         intel_crtc_disable_pipe_crc(intel_crtc);
13268
13269                         dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13270                         intel_crtc->active = false;
13271                         intel_fbc_disable(intel_crtc);
13272                         intel_disable_shared_dpll(old_intel_crtc_state);
13273
13274                         /*
13275                          * Underruns don't always raise
13276                          * interrupts, so check manually.
13277                          */
13278                         intel_check_cpu_fifo_underruns(dev_priv);
13279                         intel_check_pch_fifo_underruns(dev_priv);
13280
13281                         /* FIXME unify this for all platforms */
13282                         if (!new_crtc_state->active &&
13283                             !HAS_GMCH(dev_priv) &&
13284                             dev_priv->display.initial_watermarks)
13285                                 dev_priv->display.initial_watermarks(intel_state,
13286                                                                      new_intel_crtc_state);
13287                 }
13288         }
13289
13290         /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13291         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13292                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13293
13294         if (intel_state->modeset) {
13295                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13296
13297                 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
13298
13299                 /*
13300                  * SKL workaround: bspec recommends we disable the SAGV when we
13301                  * have more then one pipe enabled
13302                  */
13303                 if (!intel_can_enable_sagv(state))
13304                         intel_disable_sagv(dev_priv);
13305
13306                 intel_modeset_verify_disabled(dev, state);
13307         }
13308
13309         /* Complete the events for pipes that have now been disabled */
13310         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13311                 bool modeset = needs_modeset(new_crtc_state);
13312
13313                 /* Complete events for now disable pipes here. */
13314                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13315                         spin_lock_irq(&dev->event_lock);
13316                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13317                         spin_unlock_irq(&dev->event_lock);
13318
13319                         new_crtc_state->event = NULL;
13320                 }
13321         }
13322
13323         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13324         dev_priv->display.update_crtcs(state);
13325
13326         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13327          * already, but still need the state for the delayed optimization. To
13328          * fix this:
13329          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13330          * - schedule that vblank worker _before_ calling hw_done
13331          * - at the start of commit_tail, cancel it _synchrously
13332          * - switch over to the vblank wait helper in the core after that since
13333          *   we don't need out special handling any more.
13334          */
13335         drm_atomic_helper_wait_for_flip_done(dev, state);
13336
13337         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13338                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13339
13340                 if (new_crtc_state->active &&
13341                     !needs_modeset(new_crtc_state) &&
13342                     (new_intel_crtc_state->base.color_mgmt_changed ||
13343                      new_intel_crtc_state->update_pipe))
13344                         intel_color_load_luts(new_intel_crtc_state);
13345         }
13346
13347         /*
13348          * Now that the vblank has passed, we can go ahead and program the
13349          * optimal watermarks on platforms that need two-step watermark
13350          * programming.
13351          *
13352          * TODO: Move this (and other cleanup) to an async worker eventually.
13353          */
13354         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13355                 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13356
13357                 if (dev_priv->display.optimize_watermarks)
13358                         dev_priv->display.optimize_watermarks(intel_state,
13359                                                               new_intel_crtc_state);
13360         }
13361
13362         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13363                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13364
13365                 if (put_domains[i])
13366                         modeset_put_power_domains(dev_priv, put_domains[i]);
13367
13368                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13369         }
13370
13371         if (intel_state->modeset)
13372                 intel_verify_planes(intel_state);
13373
13374         if (intel_state->modeset && intel_can_enable_sagv(state))
13375                 intel_enable_sagv(dev_priv);
13376
13377         drm_atomic_helper_commit_hw_done(state);
13378
13379         if (intel_state->modeset) {
13380                 /* As one of the primary mmio accessors, KMS has a high
13381                  * likelihood of triggering bugs in unclaimed access. After we
13382                  * finish modesetting, see if an error has been flagged, and if
13383                  * so enable debugging for the next modeset - and hope we catch
13384                  * the culprit.
13385                  */
13386                 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13387                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13388         }
13389
13390         /*
13391          * Defer the cleanup of the old state to a separate worker to not
13392          * impede the current task (userspace for blocking modesets) that
13393          * are executed inline. For out-of-line asynchronous modesets/flips,
13394          * deferring to a new worker seems overkill, but we would place a
13395          * schedule point (cond_resched()) here anyway to keep latencies
13396          * down.
13397          */
13398         INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13399         queue_work(system_highpri_wq, &state->commit_work);
13400 }
13401
13402 static void intel_atomic_commit_work(struct work_struct *work)
13403 {
13404         struct drm_atomic_state *state =
13405                 container_of(work, struct drm_atomic_state, commit_work);
13406
13407         intel_atomic_commit_tail(state);
13408 }
13409
13410 static int __i915_sw_fence_call
13411 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13412                           enum i915_sw_fence_notify notify)
13413 {
13414         struct intel_atomic_state *state =
13415                 container_of(fence, struct intel_atomic_state, commit_ready);
13416
13417         switch (notify) {
13418         case FENCE_COMPLETE:
13419                 /* we do blocking waits in the worker, nothing to do here */
13420                 break;
13421         case FENCE_FREE:
13422                 {
13423                         struct intel_atomic_helper *helper =
13424                                 &to_i915(state->base.dev)->atomic_helper;
13425
13426                         if (llist_add(&state->freed, &helper->free_list))
13427                                 schedule_work(&helper->free_work);
13428                         break;
13429                 }
13430         }
13431
13432         return NOTIFY_DONE;
13433 }
13434
13435 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13436 {
13437         struct drm_plane_state *old_plane_state, *new_plane_state;
13438         struct drm_plane *plane;
13439         int i;
13440
13441         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13442                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13443                                   intel_fb_obj(new_plane_state->fb),
13444                                   to_intel_plane(plane)->frontbuffer_bit);
13445 }
13446
13447 /**
13448  * intel_atomic_commit - commit validated state object
13449  * @dev: DRM device
13450  * @state: the top-level driver state object
13451  * @nonblock: nonblocking commit
13452  *
13453  * This function commits a top-level state object that has been validated
13454  * with drm_atomic_helper_check().
13455  *
13456  * RETURNS
13457  * Zero for success or -errno.
13458  */
13459 static int intel_atomic_commit(struct drm_device *dev,
13460                                struct drm_atomic_state *state,
13461                                bool nonblock)
13462 {
13463         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13464         struct drm_i915_private *dev_priv = to_i915(dev);
13465         int ret = 0;
13466
13467         drm_atomic_state_get(state);
13468         i915_sw_fence_init(&intel_state->commit_ready,
13469                            intel_atomic_commit_ready);
13470
13471         /*
13472          * The intel_legacy_cursor_update() fast path takes care
13473          * of avoiding the vblank waits for simple cursor
13474          * movement and flips. For cursor on/off and size changes,
13475          * we want to perform the vblank waits so that watermark
13476          * updates happen during the correct frames. Gen9+ have
13477          * double buffered watermarks and so shouldn't need this.
13478          *
13479          * Unset state->legacy_cursor_update before the call to
13480          * drm_atomic_helper_setup_commit() because otherwise
13481          * drm_atomic_helper_wait_for_flip_done() is a noop and
13482          * we get FIFO underruns because we didn't wait
13483          * for vblank.
13484          *
13485          * FIXME doing watermarks and fb cleanup from a vblank worker
13486          * (assuming we had any) would solve these problems.
13487          */
13488         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13489                 struct intel_crtc_state *new_crtc_state;
13490                 struct intel_crtc *crtc;
13491                 int i;
13492
13493                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13494                         if (new_crtc_state->wm.need_postvbl_update ||
13495                             new_crtc_state->update_wm_post)
13496                                 state->legacy_cursor_update = false;
13497         }
13498
13499         ret = intel_atomic_prepare_commit(dev, state);
13500         if (ret) {
13501                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13502                 i915_sw_fence_commit(&intel_state->commit_ready);
13503                 return ret;
13504         }
13505
13506         ret = drm_atomic_helper_setup_commit(state, nonblock);
13507         if (!ret)
13508                 ret = drm_atomic_helper_swap_state(state, true);
13509
13510         if (ret) {
13511                 i915_sw_fence_commit(&intel_state->commit_ready);
13512
13513                 drm_atomic_helper_cleanup_planes(dev, state);
13514                 return ret;
13515         }
13516         dev_priv->wm.distrust_bios_wm = false;
13517         intel_shared_dpll_swap_state(state);
13518         intel_atomic_track_fbs(state);
13519
13520         if (intel_state->modeset) {
13521                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13522                        sizeof(intel_state->min_cdclk));
13523                 memcpy(dev_priv->min_voltage_level,
13524                        intel_state->min_voltage_level,
13525                        sizeof(intel_state->min_voltage_level));
13526                 dev_priv->active_crtcs = intel_state->active_crtcs;
13527                 dev_priv->cdclk.logical = intel_state->cdclk.logical;
13528                 dev_priv->cdclk.actual = intel_state->cdclk.actual;
13529         }
13530
13531         drm_atomic_state_get(state);
13532         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13533
13534         i915_sw_fence_commit(&intel_state->commit_ready);
13535         if (nonblock && intel_state->modeset) {
13536                 queue_work(dev_priv->modeset_wq, &state->commit_work);
13537         } else if (nonblock) {
13538                 queue_work(system_unbound_wq, &state->commit_work);
13539         } else {
13540                 if (intel_state->modeset)
13541                         flush_workqueue(dev_priv->modeset_wq);
13542                 intel_atomic_commit_tail(state);
13543         }
13544
13545         return 0;
13546 }
13547
13548 static const struct drm_crtc_funcs intel_crtc_funcs = {
13549         .gamma_set = drm_atomic_helper_legacy_gamma_set,
13550         .set_config = drm_atomic_helper_set_config,
13551         .destroy = intel_crtc_destroy,
13552         .page_flip = drm_atomic_helper_page_flip,
13553         .atomic_duplicate_state = intel_crtc_duplicate_state,
13554         .atomic_destroy_state = intel_crtc_destroy_state,
13555         .set_crc_source = intel_crtc_set_crc_source,
13556         .verify_crc_source = intel_crtc_verify_crc_source,
13557         .get_crc_sources = intel_crtc_get_crc_sources,
13558 };
13559
13560 struct wait_rps_boost {
13561         struct wait_queue_entry wait;
13562
13563         struct drm_crtc *crtc;
13564         struct i915_request *request;
13565 };
13566
13567 static int do_rps_boost(struct wait_queue_entry *_wait,
13568                         unsigned mode, int sync, void *key)
13569 {
13570         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
13571         struct i915_request *rq = wait->request;
13572
13573         /*
13574          * If we missed the vblank, but the request is already running it
13575          * is reasonable to assume that it will complete before the next
13576          * vblank without our intervention, so leave RPS alone.
13577          */
13578         if (!i915_request_started(rq))
13579                 gen6_rps_boost(rq, NULL);
13580         i915_request_put(rq);
13581
13582         drm_crtc_vblank_put(wait->crtc);
13583
13584         list_del(&wait->wait.entry);
13585         kfree(wait);
13586         return 1;
13587 }
13588
13589 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13590                                        struct dma_fence *fence)
13591 {
13592         struct wait_rps_boost *wait;
13593
13594         if (!dma_fence_is_i915(fence))
13595                 return;
13596
13597         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13598                 return;
13599
13600         if (drm_crtc_vblank_get(crtc))
13601                 return;
13602
13603         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13604         if (!wait) {
13605                 drm_crtc_vblank_put(crtc);
13606                 return;
13607         }
13608
13609         wait->request = to_request(dma_fence_get(fence));
13610         wait->crtc = crtc;
13611
13612         wait->wait.func = do_rps_boost;
13613         wait->wait.flags = 0;
13614
13615         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13616 }
13617
13618 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13619 {
13620         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13621         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13622         struct drm_framebuffer *fb = plane_state->base.fb;
13623         struct i915_vma *vma;
13624
13625         if (plane->id == PLANE_CURSOR &&
13626             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
13627                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13628                 const int align = intel_cursor_alignment(dev_priv);
13629                 int err;
13630
13631                 err = i915_gem_object_attach_phys(obj, align);
13632                 if (err)
13633                         return err;
13634         }
13635
13636         vma = intel_pin_and_fence_fb_obj(fb,
13637                                          &plane_state->view,
13638                                          intel_plane_uses_fence(plane_state),
13639                                          &plane_state->flags);
13640         if (IS_ERR(vma))
13641                 return PTR_ERR(vma);
13642
13643         plane_state->vma = vma;
13644
13645         return 0;
13646 }
13647
13648 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13649 {
13650         struct i915_vma *vma;
13651
13652         vma = fetch_and_zero(&old_plane_state->vma);
13653         if (vma)
13654                 intel_unpin_fb_vma(vma, old_plane_state->flags);
13655 }
13656
13657 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13658 {
13659         struct i915_sched_attr attr = {
13660                 .priority = I915_PRIORITY_DISPLAY,
13661         };
13662
13663         i915_gem_object_wait_priority(obj, 0, &attr);
13664 }
13665
13666 /**
13667  * intel_prepare_plane_fb - Prepare fb for usage on plane
13668  * @plane: drm plane to prepare for
13669  * @new_state: the plane state being prepared
13670  *
13671  * Prepares a framebuffer for usage on a display plane.  Generally this
13672  * involves pinning the underlying object and updating the frontbuffer tracking
13673  * bits.  Some older platforms need special physical address handling for
13674  * cursor planes.
13675  *
13676  * Must be called with struct_mutex held.
13677  *
13678  * Returns 0 on success, negative error code on failure.
13679  */
13680 int
13681 intel_prepare_plane_fb(struct drm_plane *plane,
13682                        struct drm_plane_state *new_state)
13683 {
13684         struct intel_atomic_state *intel_state =
13685                 to_intel_atomic_state(new_state->state);
13686         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13687         struct drm_framebuffer *fb = new_state->fb;
13688         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13689         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13690         int ret;
13691
13692         if (old_obj) {
13693                 struct drm_crtc_state *crtc_state =
13694                         drm_atomic_get_new_crtc_state(new_state->state,
13695                                                       plane->state->crtc);
13696
13697                 /* Big Hammer, we also need to ensure that any pending
13698                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13699                  * current scanout is retired before unpinning the old
13700                  * framebuffer. Note that we rely on userspace rendering
13701                  * into the buffer attached to the pipe they are waiting
13702                  * on. If not, userspace generates a GPU hang with IPEHR
13703                  * point to the MI_WAIT_FOR_EVENT.
13704                  *
13705                  * This should only fail upon a hung GPU, in which case we
13706                  * can safely continue.
13707                  */
13708                 if (needs_modeset(crtc_state)) {
13709                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13710                                                               old_obj->resv, NULL,
13711                                                               false, 0,
13712                                                               GFP_KERNEL);
13713                         if (ret < 0)
13714                                 return ret;
13715                 }
13716         }
13717
13718         if (new_state->fence) { /* explicit fencing */
13719                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13720                                                     new_state->fence,
13721                                                     I915_FENCE_TIMEOUT,
13722                                                     GFP_KERNEL);
13723                 if (ret < 0)
13724                         return ret;
13725         }
13726
13727         if (!obj)
13728                 return 0;
13729
13730         ret = i915_gem_object_pin_pages(obj);
13731         if (ret)
13732                 return ret;
13733
13734         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13735         if (ret) {
13736                 i915_gem_object_unpin_pages(obj);
13737                 return ret;
13738         }
13739
13740         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13741
13742         mutex_unlock(&dev_priv->drm.struct_mutex);
13743         i915_gem_object_unpin_pages(obj);
13744         if (ret)
13745                 return ret;
13746
13747         fb_obj_bump_render_priority(obj);
13748         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13749
13750         if (!new_state->fence) { /* implicit fencing */
13751                 struct dma_fence *fence;
13752
13753                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13754                                                       obj->resv, NULL,
13755                                                       false, I915_FENCE_TIMEOUT,
13756                                                       GFP_KERNEL);
13757                 if (ret < 0)
13758                         return ret;
13759
13760                 fence = reservation_object_get_excl_rcu(obj->resv);
13761                 if (fence) {
13762                         add_rps_boost_after_vblank(new_state->crtc, fence);
13763                         dma_fence_put(fence);
13764                 }
13765         } else {
13766                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13767         }
13768
13769         /*
13770          * We declare pageflips to be interactive and so merit a small bias
13771          * towards upclocking to deliver the frame on time. By only changing
13772          * the RPS thresholds to sample more regularly and aim for higher
13773          * clocks we can hopefully deliver low power workloads (like kodi)
13774          * that are not quite steady state without resorting to forcing
13775          * maximum clocks following a vblank miss (see do_rps_boost()).
13776          */
13777         if (!intel_state->rps_interactive) {
13778                 intel_rps_mark_interactive(dev_priv, true);
13779                 intel_state->rps_interactive = true;
13780         }
13781
13782         return 0;
13783 }
13784
13785 /**
13786  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13787  * @plane: drm plane to clean up for
13788  * @old_state: the state from the previous modeset
13789  *
13790  * Cleans up a framebuffer that has just been removed from a plane.
13791  *
13792  * Must be called with struct_mutex held.
13793  */
13794 void
13795 intel_cleanup_plane_fb(struct drm_plane *plane,
13796                        struct drm_plane_state *old_state)
13797 {
13798         struct intel_atomic_state *intel_state =
13799                 to_intel_atomic_state(old_state->state);
13800         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13801
13802         if (intel_state->rps_interactive) {
13803                 intel_rps_mark_interactive(dev_priv, false);
13804                 intel_state->rps_interactive = false;
13805         }
13806
13807         /* Should only be called after a successful intel_prepare_plane_fb()! */
13808         mutex_lock(&dev_priv->drm.struct_mutex);
13809         intel_plane_unpin_fb(to_intel_plane_state(old_state));
13810         mutex_unlock(&dev_priv->drm.struct_mutex);
13811 }
13812
13813 int
13814 skl_max_scale(const struct intel_crtc_state *crtc_state,
13815               u32 pixel_format)
13816 {
13817         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13818         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13819         int max_scale, mult;
13820         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13821
13822         if (!crtc_state->base.enable)
13823                 return DRM_PLANE_HELPER_NO_SCALING;
13824
13825         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13826         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13827
13828         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
13829                 max_dotclk *= 2;
13830
13831         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13832                 return DRM_PLANE_HELPER_NO_SCALING;
13833
13834         /*
13835          * skl max scale is lower of:
13836          *    close to 3 but not 3, -1 is for that purpose
13837          *            or
13838          *    cdclk/crtc_clock
13839          */
13840         mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
13841         tmpclk1 = (1 << 16) * mult - 1;
13842         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13843         max_scale = min(tmpclk1, tmpclk2);
13844
13845         return max_scale;
13846 }
13847
13848 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13849                                     struct drm_crtc_state *old_crtc_state)
13850 {
13851         struct drm_device *dev = crtc->dev;
13852         struct drm_i915_private *dev_priv = to_i915(dev);
13853         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13854         struct intel_crtc_state *old_intel_cstate =
13855                 to_intel_crtc_state(old_crtc_state);
13856         struct intel_atomic_state *old_intel_state =
13857                 to_intel_atomic_state(old_crtc_state->state);
13858         struct intel_crtc_state *intel_cstate =
13859                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13860         bool modeset = needs_modeset(&intel_cstate->base);
13861
13862         /* Perform vblank evasion around commit operation */
13863         intel_pipe_update_start(intel_cstate);
13864
13865         if (modeset)
13866                 goto out;
13867
13868         if (intel_cstate->base.color_mgmt_changed ||
13869             intel_cstate->update_pipe)
13870                 intel_color_commit(intel_cstate);
13871
13872         if (intel_cstate->update_pipe)
13873                 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13874         else if (INTEL_GEN(dev_priv) >= 9)
13875                 skl_detach_scalers(intel_cstate);
13876
13877 out:
13878         if (dev_priv->display.atomic_update_watermarks)
13879                 dev_priv->display.atomic_update_watermarks(old_intel_state,
13880                                                            intel_cstate);
13881 }
13882
13883 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13884                                   struct intel_crtc_state *crtc_state)
13885 {
13886         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13887
13888         if (!IS_GEN(dev_priv, 2))
13889                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13890
13891         if (crtc_state->has_pch_encoder) {
13892                 enum pipe pch_transcoder =
13893                         intel_crtc_pch_transcoder(crtc);
13894
13895                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13896         }
13897 }
13898
13899 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13900                                      struct drm_crtc_state *old_crtc_state)
13901 {
13902         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13903         struct intel_atomic_state *old_intel_state =
13904                 to_intel_atomic_state(old_crtc_state->state);
13905         struct intel_crtc_state *new_crtc_state =
13906                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13907
13908         intel_pipe_update_end(new_crtc_state);
13909
13910         if (new_crtc_state->update_pipe &&
13911             !needs_modeset(&new_crtc_state->base) &&
13912             old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13913                 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
13914 }
13915
13916 /**
13917  * intel_plane_destroy - destroy a plane
13918  * @plane: plane to destroy
13919  *
13920  * Common destruction function for all types of planes (primary, cursor,
13921  * sprite).
13922  */
13923 void intel_plane_destroy(struct drm_plane *plane)
13924 {
13925         drm_plane_cleanup(plane);
13926         kfree(to_intel_plane(plane));
13927 }
13928
13929 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13930                                             u32 format, u64 modifier)
13931 {
13932         switch (modifier) {
13933         case DRM_FORMAT_MOD_LINEAR:
13934         case I915_FORMAT_MOD_X_TILED:
13935                 break;
13936         default:
13937                 return false;
13938         }
13939
13940         switch (format) {
13941         case DRM_FORMAT_C8:
13942         case DRM_FORMAT_RGB565:
13943         case DRM_FORMAT_XRGB1555:
13944         case DRM_FORMAT_XRGB8888:
13945                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13946                         modifier == I915_FORMAT_MOD_X_TILED;
13947         default:
13948                 return false;
13949         }
13950 }
13951
13952 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13953                                             u32 format, u64 modifier)
13954 {
13955         switch (modifier) {
13956         case DRM_FORMAT_MOD_LINEAR:
13957         case I915_FORMAT_MOD_X_TILED:
13958                 break;
13959         default:
13960                 return false;
13961         }
13962
13963         switch (format) {
13964         case DRM_FORMAT_C8:
13965         case DRM_FORMAT_RGB565:
13966         case DRM_FORMAT_XRGB8888:
13967         case DRM_FORMAT_XBGR8888:
13968         case DRM_FORMAT_XRGB2101010:
13969         case DRM_FORMAT_XBGR2101010:
13970                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13971                         modifier == I915_FORMAT_MOD_X_TILED;
13972         default:
13973                 return false;
13974         }
13975 }
13976
13977 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13978                                               u32 format, u64 modifier)
13979 {
13980         return modifier == DRM_FORMAT_MOD_LINEAR &&
13981                 format == DRM_FORMAT_ARGB8888;
13982 }
13983
13984 static const struct drm_plane_funcs i965_plane_funcs = {
13985         .update_plane = drm_atomic_helper_update_plane,
13986         .disable_plane = drm_atomic_helper_disable_plane,
13987         .destroy = intel_plane_destroy,
13988         .atomic_get_property = intel_plane_atomic_get_property,
13989         .atomic_set_property = intel_plane_atomic_set_property,
13990         .atomic_duplicate_state = intel_plane_duplicate_state,
13991         .atomic_destroy_state = intel_plane_destroy_state,
13992         .format_mod_supported = i965_plane_format_mod_supported,
13993 };
13994
13995 static const struct drm_plane_funcs i8xx_plane_funcs = {
13996         .update_plane = drm_atomic_helper_update_plane,
13997         .disable_plane = drm_atomic_helper_disable_plane,
13998         .destroy = intel_plane_destroy,
13999         .atomic_get_property = intel_plane_atomic_get_property,
14000         .atomic_set_property = intel_plane_atomic_set_property,
14001         .atomic_duplicate_state = intel_plane_duplicate_state,
14002         .atomic_destroy_state = intel_plane_destroy_state,
14003         .format_mod_supported = i8xx_plane_format_mod_supported,
14004 };
14005
14006 static int
14007 intel_legacy_cursor_update(struct drm_plane *plane,
14008                            struct drm_crtc *crtc,
14009                            struct drm_framebuffer *fb,
14010                            int crtc_x, int crtc_y,
14011                            unsigned int crtc_w, unsigned int crtc_h,
14012                            u32 src_x, u32 src_y,
14013                            u32 src_w, u32 src_h,
14014                            struct drm_modeset_acquire_ctx *ctx)
14015 {
14016         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14017         int ret;
14018         struct drm_plane_state *old_plane_state, *new_plane_state;
14019         struct intel_plane *intel_plane = to_intel_plane(plane);
14020         struct drm_framebuffer *old_fb;
14021         struct intel_crtc_state *crtc_state =
14022                 to_intel_crtc_state(crtc->state);
14023         struct intel_crtc_state *new_crtc_state;
14024
14025         /*
14026          * When crtc is inactive or there is a modeset pending,
14027          * wait for it to complete in the slowpath
14028          */
14029         if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14030             crtc_state->update_pipe)
14031                 goto slow;
14032
14033         old_plane_state = plane->state;
14034         /*
14035          * Don't do an async update if there is an outstanding commit modifying
14036          * the plane.  This prevents our async update's changes from getting
14037          * overridden by a previous synchronous update's state.
14038          */
14039         if (old_plane_state->commit &&
14040             !try_wait_for_completion(&old_plane_state->commit->hw_done))
14041                 goto slow;
14042
14043         /*
14044          * If any parameters change that may affect watermarks,
14045          * take the slowpath. Only changing fb or position should be
14046          * in the fastpath.
14047          */
14048         if (old_plane_state->crtc != crtc ||
14049             old_plane_state->src_w != src_w ||
14050             old_plane_state->src_h != src_h ||
14051             old_plane_state->crtc_w != crtc_w ||
14052             old_plane_state->crtc_h != crtc_h ||
14053             !old_plane_state->fb != !fb)
14054                 goto slow;
14055
14056         new_plane_state = intel_plane_duplicate_state(plane);
14057         if (!new_plane_state)
14058                 return -ENOMEM;
14059
14060         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14061         if (!new_crtc_state) {
14062                 ret = -ENOMEM;
14063                 goto out_free;
14064         }
14065
14066         drm_atomic_set_fb_for_plane(new_plane_state, fb);
14067
14068         new_plane_state->src_x = src_x;
14069         new_plane_state->src_y = src_y;
14070         new_plane_state->src_w = src_w;
14071         new_plane_state->src_h = src_h;
14072         new_plane_state->crtc_x = crtc_x;
14073         new_plane_state->crtc_y = crtc_y;
14074         new_plane_state->crtc_w = crtc_w;
14075         new_plane_state->crtc_h = crtc_h;
14076
14077         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14078                                                   to_intel_plane_state(old_plane_state),
14079                                                   to_intel_plane_state(new_plane_state));
14080         if (ret)
14081                 goto out_free;
14082
14083         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14084         if (ret)
14085                 goto out_free;
14086
14087         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14088         if (ret)
14089                 goto out_unlock;
14090
14091         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14092
14093         old_fb = old_plane_state->fb;
14094         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14095                           intel_plane->frontbuffer_bit);
14096
14097         /* Swap plane state */
14098         plane->state = new_plane_state;
14099
14100         /*
14101          * We cannot swap crtc_state as it may be in use by an atomic commit or
14102          * page flip that's running simultaneously. If we swap crtc_state and
14103          * destroy the old state, we will cause a use-after-free there.
14104          *
14105          * Only update active_planes, which is needed for our internal
14106          * bookkeeping. Either value will do the right thing when updating
14107          * planes atomically. If the cursor was part of the atomic update then
14108          * we would have taken the slowpath.
14109          */
14110         crtc_state->active_planes = new_crtc_state->active_planes;
14111
14112         if (plane->state->visible) {
14113                 trace_intel_update_plane(plane, to_intel_crtc(crtc));
14114                 intel_plane->update_plane(intel_plane, crtc_state,
14115                                           to_intel_plane_state(plane->state));
14116         } else {
14117                 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
14118                 intel_plane->disable_plane(intel_plane, crtc_state);
14119         }
14120
14121         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14122
14123 out_unlock:
14124         mutex_unlock(&dev_priv->drm.struct_mutex);
14125 out_free:
14126         if (new_crtc_state)
14127                 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14128         if (ret)
14129                 intel_plane_destroy_state(plane, new_plane_state);
14130         else
14131                 intel_plane_destroy_state(plane, old_plane_state);
14132         return ret;
14133
14134 slow:
14135         return drm_atomic_helper_update_plane(plane, crtc, fb,
14136                                               crtc_x, crtc_y, crtc_w, crtc_h,
14137                                               src_x, src_y, src_w, src_h, ctx);
14138 }
14139
14140 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14141         .update_plane = intel_legacy_cursor_update,
14142         .disable_plane = drm_atomic_helper_disable_plane,
14143         .destroy = intel_plane_destroy,
14144         .atomic_get_property = intel_plane_atomic_get_property,
14145         .atomic_set_property = intel_plane_atomic_set_property,
14146         .atomic_duplicate_state = intel_plane_duplicate_state,
14147         .atomic_destroy_state = intel_plane_destroy_state,
14148         .format_mod_supported = intel_cursor_format_mod_supported,
14149 };
14150
14151 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14152                                enum i9xx_plane_id i9xx_plane)
14153 {
14154         if (!HAS_FBC(dev_priv))
14155                 return false;
14156
14157         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14158                 return i9xx_plane == PLANE_A; /* tied to pipe A */
14159         else if (IS_IVYBRIDGE(dev_priv))
14160                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14161                         i9xx_plane == PLANE_C;
14162         else if (INTEL_GEN(dev_priv) >= 4)
14163                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14164         else
14165                 return i9xx_plane == PLANE_A;
14166 }
14167
14168 static struct intel_plane *
14169 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14170 {
14171         struct intel_plane *plane;
14172         const struct drm_plane_funcs *plane_funcs;
14173         unsigned int supported_rotations;
14174         unsigned int possible_crtcs;
14175         const u64 *modifiers;
14176         const u32 *formats;
14177         int num_formats;
14178         int ret;
14179
14180         if (INTEL_GEN(dev_priv) >= 9)
14181                 return skl_universal_plane_create(dev_priv, pipe,
14182                                                   PLANE_PRIMARY);
14183
14184         plane = intel_plane_alloc();
14185         if (IS_ERR(plane))
14186                 return plane;
14187
14188         plane->pipe = pipe;
14189         /*
14190          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14191          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14192          */
14193         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14194                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14195         else
14196                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14197         plane->id = PLANE_PRIMARY;
14198         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14199
14200         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14201         if (plane->has_fbc) {
14202                 struct intel_fbc *fbc = &dev_priv->fbc;
14203
14204                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14205         }
14206
14207         if (INTEL_GEN(dev_priv) >= 4) {
14208                 formats = i965_primary_formats;
14209                 num_formats = ARRAY_SIZE(i965_primary_formats);
14210                 modifiers = i9xx_format_modifiers;
14211
14212                 plane->max_stride = i9xx_plane_max_stride;
14213                 plane->update_plane = i9xx_update_plane;
14214                 plane->disable_plane = i9xx_disable_plane;
14215                 plane->get_hw_state = i9xx_plane_get_hw_state;
14216                 plane->check_plane = i9xx_plane_check;
14217
14218                 plane_funcs = &i965_plane_funcs;
14219         } else {
14220                 formats = i8xx_primary_formats;
14221                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14222                 modifiers = i9xx_format_modifiers;
14223
14224                 plane->max_stride = i9xx_plane_max_stride;
14225                 plane->update_plane = i9xx_update_plane;
14226                 plane->disable_plane = i9xx_disable_plane;
14227                 plane->get_hw_state = i9xx_plane_get_hw_state;
14228                 plane->check_plane = i9xx_plane_check;
14229
14230                 plane_funcs = &i8xx_plane_funcs;
14231         }
14232
14233         possible_crtcs = BIT(pipe);
14234
14235         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14236                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14237                                                possible_crtcs, plane_funcs,
14238                                                formats, num_formats, modifiers,
14239                                                DRM_PLANE_TYPE_PRIMARY,
14240                                                "primary %c", pipe_name(pipe));
14241         else
14242                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14243                                                possible_crtcs, plane_funcs,
14244                                                formats, num_formats, modifiers,
14245                                                DRM_PLANE_TYPE_PRIMARY,
14246                                                "plane %c",
14247                                                plane_name(plane->i9xx_plane));
14248         if (ret)
14249                 goto fail;
14250
14251         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14252                 supported_rotations =
14253                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14254                         DRM_MODE_REFLECT_X;
14255         } else if (INTEL_GEN(dev_priv) >= 4) {
14256                 supported_rotations =
14257                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14258         } else {
14259                 supported_rotations = DRM_MODE_ROTATE_0;
14260         }
14261
14262         if (INTEL_GEN(dev_priv) >= 4)
14263                 drm_plane_create_rotation_property(&plane->base,
14264                                                    DRM_MODE_ROTATE_0,
14265                                                    supported_rotations);
14266
14267         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14268
14269         return plane;
14270
14271 fail:
14272         intel_plane_free(plane);
14273
14274         return ERR_PTR(ret);
14275 }
14276
14277 static struct intel_plane *
14278 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14279                           enum pipe pipe)
14280 {
14281         unsigned int possible_crtcs;
14282         struct intel_plane *cursor;
14283         int ret;
14284
14285         cursor = intel_plane_alloc();
14286         if (IS_ERR(cursor))
14287                 return cursor;
14288
14289         cursor->pipe = pipe;
14290         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14291         cursor->id = PLANE_CURSOR;
14292         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14293
14294         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14295                 cursor->max_stride = i845_cursor_max_stride;
14296                 cursor->update_plane = i845_update_cursor;
14297                 cursor->disable_plane = i845_disable_cursor;
14298                 cursor->get_hw_state = i845_cursor_get_hw_state;
14299                 cursor->check_plane = i845_check_cursor;
14300         } else {
14301                 cursor->max_stride = i9xx_cursor_max_stride;
14302                 cursor->update_plane = i9xx_update_cursor;
14303                 cursor->disable_plane = i9xx_disable_cursor;
14304                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14305                 cursor->check_plane = i9xx_check_cursor;
14306         }
14307
14308         cursor->cursor.base = ~0;
14309         cursor->cursor.cntl = ~0;
14310
14311         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14312                 cursor->cursor.size = ~0;
14313
14314         possible_crtcs = BIT(pipe);
14315
14316         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14317                                        possible_crtcs, &intel_cursor_plane_funcs,
14318                                        intel_cursor_formats,
14319                                        ARRAY_SIZE(intel_cursor_formats),
14320                                        cursor_format_modifiers,
14321                                        DRM_PLANE_TYPE_CURSOR,
14322                                        "cursor %c", pipe_name(pipe));
14323         if (ret)
14324                 goto fail;
14325
14326         if (INTEL_GEN(dev_priv) >= 4)
14327                 drm_plane_create_rotation_property(&cursor->base,
14328                                                    DRM_MODE_ROTATE_0,
14329                                                    DRM_MODE_ROTATE_0 |
14330                                                    DRM_MODE_ROTATE_180);
14331
14332         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14333
14334         return cursor;
14335
14336 fail:
14337         intel_plane_free(cursor);
14338
14339         return ERR_PTR(ret);
14340 }
14341
14342 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14343                                     struct intel_crtc_state *crtc_state)
14344 {
14345         struct intel_crtc_scaler_state *scaler_state =
14346                 &crtc_state->scaler_state;
14347         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14348         int i;
14349
14350         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14351         if (!crtc->num_scalers)
14352                 return;
14353
14354         for (i = 0; i < crtc->num_scalers; i++) {
14355                 struct intel_scaler *scaler = &scaler_state->scalers[i];
14356
14357                 scaler->in_use = 0;
14358                 scaler->mode = 0;
14359         }
14360
14361         scaler_state->scaler_id = -1;
14362 }
14363
14364 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14365 {
14366         struct intel_crtc *intel_crtc;
14367         struct intel_crtc_state *crtc_state = NULL;
14368         struct intel_plane *primary = NULL;
14369         struct intel_plane *cursor = NULL;
14370         int sprite, ret;
14371
14372         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14373         if (!intel_crtc)
14374                 return -ENOMEM;
14375
14376         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14377         if (!crtc_state) {
14378                 ret = -ENOMEM;
14379                 goto fail;
14380         }
14381         intel_crtc->config = crtc_state;
14382         intel_crtc->base.state = &crtc_state->base;
14383         crtc_state->base.crtc = &intel_crtc->base;
14384
14385         primary = intel_primary_plane_create(dev_priv, pipe);
14386         if (IS_ERR(primary)) {
14387                 ret = PTR_ERR(primary);
14388                 goto fail;
14389         }
14390         intel_crtc->plane_ids_mask |= BIT(primary->id);
14391
14392         for_each_sprite(dev_priv, pipe, sprite) {
14393                 struct intel_plane *plane;
14394
14395                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14396                 if (IS_ERR(plane)) {
14397                         ret = PTR_ERR(plane);
14398                         goto fail;
14399                 }
14400                 intel_crtc->plane_ids_mask |= BIT(plane->id);
14401         }
14402
14403         cursor = intel_cursor_plane_create(dev_priv, pipe);
14404         if (IS_ERR(cursor)) {
14405                 ret = PTR_ERR(cursor);
14406                 goto fail;
14407         }
14408         intel_crtc->plane_ids_mask |= BIT(cursor->id);
14409
14410         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14411                                         &primary->base, &cursor->base,
14412                                         &intel_crtc_funcs,
14413                                         "pipe %c", pipe_name(pipe));
14414         if (ret)
14415                 goto fail;
14416
14417         intel_crtc->pipe = pipe;
14418
14419         /* initialize shared scalers */
14420         intel_crtc_init_scalers(intel_crtc, crtc_state);
14421
14422         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14423                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14424         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14425
14426         if (INTEL_GEN(dev_priv) < 9) {
14427                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14428
14429                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14430                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14431                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14432         }
14433
14434         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14435
14436         intel_color_init(intel_crtc);
14437
14438         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14439
14440         return 0;
14441
14442 fail:
14443         /*
14444          * drm_mode_config_cleanup() will free up any
14445          * crtcs/planes already initialized.
14446          */
14447         kfree(crtc_state);
14448         kfree(intel_crtc);
14449
14450         return ret;
14451 }
14452
14453 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14454                                       struct drm_file *file)
14455 {
14456         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14457         struct drm_crtc *drmmode_crtc;
14458         struct intel_crtc *crtc;
14459
14460         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14461         if (!drmmode_crtc)
14462                 return -ENOENT;
14463
14464         crtc = to_intel_crtc(drmmode_crtc);
14465         pipe_from_crtc_id->pipe = crtc->pipe;
14466
14467         return 0;
14468 }
14469
14470 static int intel_encoder_clones(struct intel_encoder *encoder)
14471 {
14472         struct drm_device *dev = encoder->base.dev;
14473         struct intel_encoder *source_encoder;
14474         int index_mask = 0;
14475         int entry = 0;
14476
14477         for_each_intel_encoder(dev, source_encoder) {
14478                 if (encoders_cloneable(encoder, source_encoder))
14479                         index_mask |= (1 << entry);
14480
14481                 entry++;
14482         }
14483
14484         return index_mask;
14485 }
14486
14487 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
14488 {
14489         if (!IS_MOBILE(dev_priv))
14490                 return false;
14491
14492         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14493                 return false;
14494
14495         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14496                 return false;
14497
14498         return true;
14499 }
14500
14501 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
14502 {
14503         if (INTEL_GEN(dev_priv) >= 9)
14504                 return false;
14505
14506         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14507                 return false;
14508
14509         if (HAS_PCH_LPT_H(dev_priv) &&
14510             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14511                 return false;
14512
14513         /* DDI E can't be used if DDI A requires 4 lanes */
14514         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14515                 return false;
14516
14517         if (!dev_priv->vbt.int_crt_support)
14518                 return false;
14519
14520         return true;
14521 }
14522
14523 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14524 {
14525         int pps_num;
14526         int pps_idx;
14527
14528         if (HAS_DDI(dev_priv))
14529                 return;
14530         /*
14531          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14532          * everywhere where registers can be write protected.
14533          */
14534         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14535                 pps_num = 2;
14536         else
14537                 pps_num = 1;
14538
14539         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14540                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14541
14542                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14543                 I915_WRITE(PP_CONTROL(pps_idx), val);
14544         }
14545 }
14546
14547 static void intel_pps_init(struct drm_i915_private *dev_priv)
14548 {
14549         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14550                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14551         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14552                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14553         else
14554                 dev_priv->pps_mmio_base = PPS_BASE;
14555
14556         intel_pps_unlock_regs_wa(dev_priv);
14557 }
14558
14559 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14560 {
14561         struct intel_encoder *encoder;
14562         bool dpd_is_edp = false;
14563
14564         intel_pps_init(dev_priv);
14565
14566         if (!HAS_DISPLAY(dev_priv))
14567                 return;
14568
14569         if (IS_ICELAKE(dev_priv)) {
14570                 intel_ddi_init(dev_priv, PORT_A);
14571                 intel_ddi_init(dev_priv, PORT_B);
14572                 intel_ddi_init(dev_priv, PORT_C);
14573                 intel_ddi_init(dev_priv, PORT_D);
14574                 intel_ddi_init(dev_priv, PORT_E);
14575                 /*
14576                  * On some ICL SKUs port F is not present. No strap bits for
14577                  * this, so rely on VBT.
14578                  * Work around broken VBTs on SKUs known to have no port F.
14579                  */
14580                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
14581                     intel_bios_is_port_present(dev_priv, PORT_F))
14582                         intel_ddi_init(dev_priv, PORT_F);
14583
14584                 icl_dsi_init(dev_priv);
14585         } else if (IS_GEN9_LP(dev_priv)) {
14586                 /*
14587                  * FIXME: Broxton doesn't support port detection via the
14588                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14589                  * detect the ports.
14590                  */
14591                 intel_ddi_init(dev_priv, PORT_A);
14592                 intel_ddi_init(dev_priv, PORT_B);
14593                 intel_ddi_init(dev_priv, PORT_C);
14594
14595                 vlv_dsi_init(dev_priv);
14596         } else if (HAS_DDI(dev_priv)) {
14597                 int found;
14598
14599                 if (intel_ddi_crt_present(dev_priv))
14600                         intel_crt_init(dev_priv);
14601
14602                 /*
14603                  * Haswell uses DDI functions to detect digital outputs.
14604                  * On SKL pre-D0 the strap isn't connected, so we assume
14605                  * it's there.
14606                  */
14607                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14608                 /* WaIgnoreDDIAStrap: skl */
14609                 if (found || IS_GEN9_BC(dev_priv))
14610                         intel_ddi_init(dev_priv, PORT_A);
14611
14612                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14613                  * register */
14614                 found = I915_READ(SFUSE_STRAP);
14615
14616                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14617                         intel_ddi_init(dev_priv, PORT_B);
14618                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14619                         intel_ddi_init(dev_priv, PORT_C);
14620                 if (found & SFUSE_STRAP_DDID_DETECTED)
14621                         intel_ddi_init(dev_priv, PORT_D);
14622                 if (found & SFUSE_STRAP_DDIF_DETECTED)
14623                         intel_ddi_init(dev_priv, PORT_F);
14624                 /*
14625                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14626                  */
14627                 if (IS_GEN9_BC(dev_priv) &&
14628                     intel_bios_is_port_present(dev_priv, PORT_E))
14629                         intel_ddi_init(dev_priv, PORT_E);
14630
14631         } else if (HAS_PCH_SPLIT(dev_priv)) {
14632                 int found;
14633
14634                 /*
14635                  * intel_edp_init_connector() depends on this completing first,
14636                  * to prevent the registration of both eDP and LVDS and the
14637                  * incorrect sharing of the PPS.
14638                  */
14639                 intel_lvds_init(dev_priv);
14640                 intel_crt_init(dev_priv);
14641
14642                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14643
14644                 if (ilk_has_edp_a(dev_priv))
14645                         intel_dp_init(dev_priv, DP_A, PORT_A);
14646
14647                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14648                         /* PCH SDVOB multiplex with HDMIB */
14649                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14650                         if (!found)
14651                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14652                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14653                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14654                 }
14655
14656                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14657                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14658
14659                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14660                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14661
14662                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14663                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14664
14665                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14666                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14667         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14668                 bool has_edp, has_port;
14669
14670                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
14671                         intel_crt_init(dev_priv);
14672
14673                 /*
14674                  * The DP_DETECTED bit is the latched state of the DDC
14675                  * SDA pin at boot. However since eDP doesn't require DDC
14676                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14677                  * eDP ports may have been muxed to an alternate function.
14678                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14679                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14680                  * detect eDP ports.
14681                  *
14682                  * Sadly the straps seem to be missing sometimes even for HDMI
14683                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14684                  * and VBT for the presence of the port. Additionally we can't
14685                  * trust the port type the VBT declares as we've seen at least
14686                  * HDMI ports that the VBT claim are DP or eDP.
14687                  */
14688                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14689                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14690                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14691                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14692                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14693                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14694
14695                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14696                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14697                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14698                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14699                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14700                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14701
14702                 if (IS_CHERRYVIEW(dev_priv)) {
14703                         /*
14704                          * eDP not supported on port D,
14705                          * so no need to worry about it
14706                          */
14707                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14708                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14709                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14710                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14711                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14712                 }
14713
14714                 vlv_dsi_init(dev_priv);
14715         } else if (IS_PINEVIEW(dev_priv)) {
14716                 intel_lvds_init(dev_priv);
14717                 intel_crt_init(dev_priv);
14718         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
14719                 bool found = false;
14720
14721                 if (IS_MOBILE(dev_priv))
14722                         intel_lvds_init(dev_priv);
14723
14724                 intel_crt_init(dev_priv);
14725
14726                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14727                         DRM_DEBUG_KMS("probing SDVOB\n");
14728                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14729                         if (!found && IS_G4X(dev_priv)) {
14730                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14731                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14732                         }
14733
14734                         if (!found && IS_G4X(dev_priv))
14735                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14736                 }
14737
14738                 /* Before G4X SDVOC doesn't have its own detect register */
14739
14740                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14741                         DRM_DEBUG_KMS("probing SDVOC\n");
14742                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14743                 }
14744
14745                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14746
14747                         if (IS_G4X(dev_priv)) {
14748                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14749                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14750                         }
14751                         if (IS_G4X(dev_priv))
14752                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14753                 }
14754
14755                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14756                         intel_dp_init(dev_priv, DP_D, PORT_D);
14757
14758                 if (SUPPORTS_TV(dev_priv))
14759                         intel_tv_init(dev_priv);
14760         } else if (IS_GEN(dev_priv, 2)) {
14761                 if (IS_I85X(dev_priv))
14762                         intel_lvds_init(dev_priv);
14763
14764                 intel_crt_init(dev_priv);
14765                 intel_dvo_init(dev_priv);
14766         }
14767
14768         intel_psr_init(dev_priv);
14769
14770         for_each_intel_encoder(&dev_priv->drm, encoder) {
14771                 encoder->base.possible_crtcs = encoder->crtc_mask;
14772                 encoder->base.possible_clones =
14773                         intel_encoder_clones(encoder);
14774         }
14775
14776         intel_init_pch_refclk(dev_priv);
14777
14778         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14779 }
14780
14781 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14782 {
14783         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14784         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14785
14786         drm_framebuffer_cleanup(fb);
14787
14788         i915_gem_object_lock(obj);
14789         WARN_ON(!obj->framebuffer_references--);
14790         i915_gem_object_unlock(obj);
14791
14792         i915_gem_object_put(obj);
14793
14794         kfree(intel_fb);
14795 }
14796
14797 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14798                                                 struct drm_file *file,
14799                                                 unsigned int *handle)
14800 {
14801         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14802
14803         if (obj->userptr.mm) {
14804                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14805                 return -EINVAL;
14806         }
14807
14808         return drm_gem_handle_create(file, &obj->base, handle);
14809 }
14810
14811 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14812                                         struct drm_file *file,
14813                                         unsigned flags, unsigned color,
14814                                         struct drm_clip_rect *clips,
14815                                         unsigned num_clips)
14816 {
14817         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14818
14819         i915_gem_object_flush_if_display(obj);
14820         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14821
14822         return 0;
14823 }
14824
14825 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14826         .destroy = intel_user_framebuffer_destroy,
14827         .create_handle = intel_user_framebuffer_create_handle,
14828         .dirty = intel_user_framebuffer_dirty,
14829 };
14830
14831 static
14832 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14833                          u32 pixel_format, u64 fb_modifier)
14834 {
14835         struct intel_crtc *crtc;
14836         struct intel_plane *plane;
14837
14838         /*
14839          * We assume the primary plane for pipe A has
14840          * the highest stride limits of them all.
14841          */
14842         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14843         plane = to_intel_plane(crtc->base.primary);
14844
14845         return plane->max_stride(plane, pixel_format, fb_modifier,
14846                                  DRM_MODE_ROTATE_0);
14847 }
14848
14849 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14850                                   struct drm_i915_gem_object *obj,
14851                                   struct drm_mode_fb_cmd2 *mode_cmd)
14852 {
14853         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14854         struct drm_framebuffer *fb = &intel_fb->base;
14855         u32 pitch_limit;
14856         unsigned int tiling, stride;
14857         int ret = -EINVAL;
14858         int i;
14859
14860         i915_gem_object_lock(obj);
14861         obj->framebuffer_references++;
14862         tiling = i915_gem_object_get_tiling(obj);
14863         stride = i915_gem_object_get_stride(obj);
14864         i915_gem_object_unlock(obj);
14865
14866         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14867                 /*
14868                  * If there's a fence, enforce that
14869                  * the fb modifier and tiling mode match.
14870                  */
14871                 if (tiling != I915_TILING_NONE &&
14872                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14873                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14874                         goto err;
14875                 }
14876         } else {
14877                 if (tiling == I915_TILING_X) {
14878                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14879                 } else if (tiling == I915_TILING_Y) {
14880                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14881                         goto err;
14882                 }
14883         }
14884
14885         if (!drm_any_plane_has_format(&dev_priv->drm,
14886                                       mode_cmd->pixel_format,
14887                                       mode_cmd->modifier[0])) {
14888                 struct drm_format_name_buf format_name;
14889
14890                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
14891                               drm_get_format_name(mode_cmd->pixel_format,
14892                                                   &format_name),
14893                               mode_cmd->modifier[0]);
14894                 goto err;
14895         }
14896
14897         /*
14898          * gen2/3 display engine uses the fence if present,
14899          * so the tiling mode must match the fb modifier exactly.
14900          */
14901         if (INTEL_GEN(dev_priv) < 4 &&
14902             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14903                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14904                 goto err;
14905         }
14906
14907         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
14908                                            mode_cmd->modifier[0]);
14909         if (mode_cmd->pitches[0] > pitch_limit) {
14910                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14911                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14912                               "tiled" : "linear",
14913                               mode_cmd->pitches[0], pitch_limit);
14914                 goto err;
14915         }
14916
14917         /*
14918          * If there's a fence, enforce that
14919          * the fb pitch and fence stride match.
14920          */
14921         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14922                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14923                               mode_cmd->pitches[0], stride);
14924                 goto err;
14925         }
14926
14927         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14928         if (mode_cmd->offsets[0] != 0)
14929                 goto err;
14930
14931         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14932
14933         for (i = 0; i < fb->format->num_planes; i++) {
14934                 u32 stride_alignment;
14935
14936                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14937                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
14938                         goto err;
14939                 }
14940
14941                 stride_alignment = intel_fb_stride_alignment(fb, i);
14942
14943                 /*
14944                  * Display WA #0531: skl,bxt,kbl,glk
14945                  *
14946                  * Render decompression and plane width > 3840
14947                  * combined with horizontal panning requires the
14948                  * plane stride to be a multiple of 4. We'll just
14949                  * require the entire fb to accommodate that to avoid
14950                  * potential runtime errors at plane configuration time.
14951                  */
14952                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
14953                     is_ccs_modifier(fb->modifier))
14954                         stride_alignment *= 4;
14955
14956                 if (fb->pitches[i] & (stride_alignment - 1)) {
14957                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14958                                       i, fb->pitches[i], stride_alignment);
14959                         goto err;
14960                 }
14961
14962                 fb->obj[i] = &obj->base;
14963         }
14964
14965         ret = intel_fill_fb_info(dev_priv, fb);
14966         if (ret)
14967                 goto err;
14968
14969         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14970         if (ret) {
14971                 DRM_ERROR("framebuffer init failed %d\n", ret);
14972                 goto err;
14973         }
14974
14975         return 0;
14976
14977 err:
14978         i915_gem_object_lock(obj);
14979         obj->framebuffer_references--;
14980         i915_gem_object_unlock(obj);
14981         return ret;
14982 }
14983
14984 static struct drm_framebuffer *
14985 intel_user_framebuffer_create(struct drm_device *dev,
14986                               struct drm_file *filp,
14987                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
14988 {
14989         struct drm_framebuffer *fb;
14990         struct drm_i915_gem_object *obj;
14991         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14992
14993         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14994         if (!obj)
14995                 return ERR_PTR(-ENOENT);
14996
14997         fb = intel_framebuffer_create(obj, &mode_cmd);
14998         if (IS_ERR(fb))
14999                 i915_gem_object_put(obj);
15000
15001         return fb;
15002 }
15003
15004 static void intel_atomic_state_free(struct drm_atomic_state *state)
15005 {
15006         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15007
15008         drm_atomic_state_default_release(state);
15009
15010         i915_sw_fence_fini(&intel_state->commit_ready);
15011
15012         kfree(state);
15013 }
15014
15015 static enum drm_mode_status
15016 intel_mode_valid(struct drm_device *dev,
15017                  const struct drm_display_mode *mode)
15018 {
15019         struct drm_i915_private *dev_priv = to_i915(dev);
15020         int hdisplay_max, htotal_max;
15021         int vdisplay_max, vtotal_max;
15022
15023         /*
15024          * Can't reject DBLSCAN here because Xorg ddxen can add piles
15025          * of DBLSCAN modes to the output's mode list when they detect
15026          * the scaling mode property on the connector. And they don't
15027          * ask the kernel to validate those modes in any way until
15028          * modeset time at which point the client gets a protocol error.
15029          * So in order to not upset those clients we silently ignore the
15030          * DBLSCAN flag on such connectors. For other connectors we will
15031          * reject modes with the DBLSCAN flag in encoder->compute_config().
15032          * And we always reject DBLSCAN modes in connector->mode_valid()
15033          * as we never want such modes on the connector's mode list.
15034          */
15035
15036         if (mode->vscan > 1)
15037                 return MODE_NO_VSCAN;
15038
15039         if (mode->flags & DRM_MODE_FLAG_HSKEW)
15040                 return MODE_H_ILLEGAL;
15041
15042         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15043                            DRM_MODE_FLAG_NCSYNC |
15044                            DRM_MODE_FLAG_PCSYNC))
15045                 return MODE_HSYNC;
15046
15047         if (mode->flags & (DRM_MODE_FLAG_BCAST |
15048                            DRM_MODE_FLAG_PIXMUX |
15049                            DRM_MODE_FLAG_CLKDIV2))
15050                 return MODE_BAD;
15051
15052         if (INTEL_GEN(dev_priv) >= 9 ||
15053             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15054                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15055                 vdisplay_max = 4096;
15056                 htotal_max = 8192;
15057                 vtotal_max = 8192;
15058         } else if (INTEL_GEN(dev_priv) >= 3) {
15059                 hdisplay_max = 4096;
15060                 vdisplay_max = 4096;
15061                 htotal_max = 8192;
15062                 vtotal_max = 8192;
15063         } else {
15064                 hdisplay_max = 2048;
15065                 vdisplay_max = 2048;
15066                 htotal_max = 4096;
15067                 vtotal_max = 4096;
15068         }
15069
15070         if (mode->hdisplay > hdisplay_max ||
15071             mode->hsync_start > htotal_max ||
15072             mode->hsync_end > htotal_max ||
15073             mode->htotal > htotal_max)
15074                 return MODE_H_ILLEGAL;
15075
15076         if (mode->vdisplay > vdisplay_max ||
15077             mode->vsync_start > vtotal_max ||
15078             mode->vsync_end > vtotal_max ||
15079             mode->vtotal > vtotal_max)
15080                 return MODE_V_ILLEGAL;
15081
15082         return MODE_OK;
15083 }
15084
15085 static const struct drm_mode_config_funcs intel_mode_funcs = {
15086         .fb_create = intel_user_framebuffer_create,
15087         .get_format_info = intel_get_format_info,
15088         .output_poll_changed = intel_fbdev_output_poll_changed,
15089         .mode_valid = intel_mode_valid,
15090         .atomic_check = intel_atomic_check,
15091         .atomic_commit = intel_atomic_commit,
15092         .atomic_state_alloc = intel_atomic_state_alloc,
15093         .atomic_state_clear = intel_atomic_state_clear,
15094         .atomic_state_free = intel_atomic_state_free,
15095 };
15096
15097 /**
15098  * intel_init_display_hooks - initialize the display modesetting hooks
15099  * @dev_priv: device private
15100  */
15101 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15102 {
15103         intel_init_cdclk_hooks(dev_priv);
15104
15105         if (INTEL_GEN(dev_priv) >= 9) {
15106                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15107                 dev_priv->display.get_initial_plane_config =
15108                         skylake_get_initial_plane_config;
15109                 dev_priv->display.crtc_compute_clock =
15110                         haswell_crtc_compute_clock;
15111                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15112                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15113         } else if (HAS_DDI(dev_priv)) {
15114                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15115                 dev_priv->display.get_initial_plane_config =
15116                         i9xx_get_initial_plane_config;
15117                 dev_priv->display.crtc_compute_clock =
15118                         haswell_crtc_compute_clock;
15119                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15120                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15121         } else if (HAS_PCH_SPLIT(dev_priv)) {
15122                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15123                 dev_priv->display.get_initial_plane_config =
15124                         i9xx_get_initial_plane_config;
15125                 dev_priv->display.crtc_compute_clock =
15126                         ironlake_crtc_compute_clock;
15127                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15128                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15129         } else if (IS_CHERRYVIEW(dev_priv)) {
15130                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15131                 dev_priv->display.get_initial_plane_config =
15132                         i9xx_get_initial_plane_config;
15133                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15134                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15135                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15136         } else if (IS_VALLEYVIEW(dev_priv)) {
15137                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15138                 dev_priv->display.get_initial_plane_config =
15139                         i9xx_get_initial_plane_config;
15140                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15141                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15142                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15143         } else if (IS_G4X(dev_priv)) {
15144                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15145                 dev_priv->display.get_initial_plane_config =
15146                         i9xx_get_initial_plane_config;
15147                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15148                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15149                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15150         } else if (IS_PINEVIEW(dev_priv)) {
15151                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15152                 dev_priv->display.get_initial_plane_config =
15153                         i9xx_get_initial_plane_config;
15154                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15155                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15156                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15157         } else if (!IS_GEN(dev_priv, 2)) {
15158                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15159                 dev_priv->display.get_initial_plane_config =
15160                         i9xx_get_initial_plane_config;
15161                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15162                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15163                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15164         } else {
15165                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15166                 dev_priv->display.get_initial_plane_config =
15167                         i9xx_get_initial_plane_config;
15168                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15169                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15170                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15171         }
15172
15173         if (IS_GEN(dev_priv, 5)) {
15174                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15175         } else if (IS_GEN(dev_priv, 6)) {
15176                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15177         } else if (IS_IVYBRIDGE(dev_priv)) {
15178                 /* FIXME: detect B0+ stepping and use auto training */
15179                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15180         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15181                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15182         }
15183
15184         if (INTEL_GEN(dev_priv) >= 9)
15185                 dev_priv->display.update_crtcs = skl_update_crtcs;
15186         else
15187                 dev_priv->display.update_crtcs = intel_update_crtcs;
15188 }
15189
15190 /* Disable the VGA plane that we never use */
15191 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15192 {
15193         struct pci_dev *pdev = dev_priv->drm.pdev;
15194         u8 sr1;
15195         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15196
15197         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15198         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15199         outb(SR01, VGA_SR_INDEX);
15200         sr1 = inb(VGA_SR_DATA);
15201         outb(sr1 | 1<<5, VGA_SR_DATA);
15202         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15203         udelay(300);
15204
15205         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15206         POSTING_READ(vga_reg);
15207 }
15208
15209 void intel_modeset_init_hw(struct drm_device *dev)
15210 {
15211         struct drm_i915_private *dev_priv = to_i915(dev);
15212
15213         intel_update_cdclk(dev_priv);
15214         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15215         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15216 }
15217
15218 /*
15219  * Calculate what we think the watermarks should be for the state we've read
15220  * out of the hardware and then immediately program those watermarks so that
15221  * we ensure the hardware settings match our internal state.
15222  *
15223  * We can calculate what we think WM's should be by creating a duplicate of the
15224  * current state (which was constructed during hardware readout) and running it
15225  * through the atomic check code to calculate new watermark values in the
15226  * state object.
15227  */
15228 static void sanitize_watermarks(struct drm_device *dev)
15229 {
15230         struct drm_i915_private *dev_priv = to_i915(dev);
15231         struct drm_atomic_state *state;
15232         struct intel_atomic_state *intel_state;
15233         struct drm_crtc *crtc;
15234         struct drm_crtc_state *cstate;
15235         struct drm_modeset_acquire_ctx ctx;
15236         int ret;
15237         int i;
15238
15239         /* Only supported on platforms that use atomic watermark design */
15240         if (!dev_priv->display.optimize_watermarks)
15241                 return;
15242
15243         /*
15244          * We need to hold connection_mutex before calling duplicate_state so
15245          * that the connector loop is protected.
15246          */
15247         drm_modeset_acquire_init(&ctx, 0);
15248 retry:
15249         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15250         if (ret == -EDEADLK) {
15251                 drm_modeset_backoff(&ctx);
15252                 goto retry;
15253         } else if (WARN_ON(ret)) {
15254                 goto fail;
15255         }
15256
15257         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15258         if (WARN_ON(IS_ERR(state)))
15259                 goto fail;
15260
15261         intel_state = to_intel_atomic_state(state);
15262
15263         /*
15264          * Hardware readout is the only time we don't want to calculate
15265          * intermediate watermarks (since we don't trust the current
15266          * watermarks).
15267          */
15268         if (!HAS_GMCH(dev_priv))
15269                 intel_state->skip_intermediate_wm = true;
15270
15271         ret = intel_atomic_check(dev, state);
15272         if (ret) {
15273                 /*
15274                  * If we fail here, it means that the hardware appears to be
15275                  * programmed in a way that shouldn't be possible, given our
15276                  * understanding of watermark requirements.  This might mean a
15277                  * mistake in the hardware readout code or a mistake in the
15278                  * watermark calculations for a given platform.  Raise a WARN
15279                  * so that this is noticeable.
15280                  *
15281                  * If this actually happens, we'll have to just leave the
15282                  * BIOS-programmed watermarks untouched and hope for the best.
15283                  */
15284                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15285                 goto put_state;
15286         }
15287
15288         /* Write calculated watermark values back */
15289         for_each_new_crtc_in_state(state, crtc, cstate, i) {
15290                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15291
15292                 cs->wm.need_postvbl_update = true;
15293                 dev_priv->display.optimize_watermarks(intel_state, cs);
15294
15295                 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15296         }
15297
15298 put_state:
15299         drm_atomic_state_put(state);
15300 fail:
15301         drm_modeset_drop_locks(&ctx);
15302         drm_modeset_acquire_fini(&ctx);
15303 }
15304
15305 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15306 {
15307         if (IS_GEN(dev_priv, 5)) {
15308                 u32 fdi_pll_clk =
15309                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15310
15311                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15312         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15313                 dev_priv->fdi_pll_freq = 270000;
15314         } else {
15315                 return;
15316         }
15317
15318         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15319 }
15320
15321 static int intel_initial_commit(struct drm_device *dev)
15322 {
15323         struct drm_atomic_state *state = NULL;
15324         struct drm_modeset_acquire_ctx ctx;
15325         struct drm_crtc *crtc;
15326         struct drm_crtc_state *crtc_state;
15327         int ret = 0;
15328
15329         state = drm_atomic_state_alloc(dev);
15330         if (!state)
15331                 return -ENOMEM;
15332
15333         drm_modeset_acquire_init(&ctx, 0);
15334
15335 retry:
15336         state->acquire_ctx = &ctx;
15337
15338         drm_for_each_crtc(crtc, dev) {
15339                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15340                 if (IS_ERR(crtc_state)) {
15341                         ret = PTR_ERR(crtc_state);
15342                         goto out;
15343                 }
15344
15345                 if (crtc_state->active) {
15346                         ret = drm_atomic_add_affected_planes(state, crtc);
15347                         if (ret)
15348                                 goto out;
15349
15350                         /*
15351                          * FIXME hack to force a LUT update to avoid the
15352                          * plane update forcing the pipe gamma on without
15353                          * having a proper LUT loaded. Remove once we
15354                          * have readout for pipe gamma enable.
15355                          */
15356                         crtc_state->color_mgmt_changed = true;
15357                 }
15358         }
15359
15360         ret = drm_atomic_commit(state);
15361
15362 out:
15363         if (ret == -EDEADLK) {
15364                 drm_atomic_state_clear(state);
15365                 drm_modeset_backoff(&ctx);
15366                 goto retry;
15367         }
15368
15369         drm_atomic_state_put(state);
15370
15371         drm_modeset_drop_locks(&ctx);
15372         drm_modeset_acquire_fini(&ctx);
15373
15374         return ret;
15375 }
15376
15377 int intel_modeset_init(struct drm_device *dev)
15378 {
15379         struct drm_i915_private *dev_priv = to_i915(dev);
15380         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15381         enum pipe pipe;
15382         struct intel_crtc *crtc;
15383         int ret;
15384
15385         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15386
15387         drm_mode_config_init(dev);
15388
15389         dev->mode_config.min_width = 0;
15390         dev->mode_config.min_height = 0;
15391
15392         dev->mode_config.preferred_depth = 24;
15393         dev->mode_config.prefer_shadow = 1;
15394
15395         dev->mode_config.allow_fb_modifiers = true;
15396
15397         dev->mode_config.funcs = &intel_mode_funcs;
15398
15399         init_llist_head(&dev_priv->atomic_helper.free_list);
15400         INIT_WORK(&dev_priv->atomic_helper.free_work,
15401                   intel_atomic_helper_free_state_worker);
15402
15403         intel_init_quirks(dev_priv);
15404
15405         intel_fbc_init(dev_priv);
15406
15407         intel_init_pm(dev_priv);
15408
15409         /*
15410          * There may be no VBT; and if the BIOS enabled SSC we can
15411          * just keep using it to avoid unnecessary flicker.  Whereas if the
15412          * BIOS isn't using it, don't assume it will work even if the VBT
15413          * indicates as much.
15414          */
15415         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15416                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15417                                             DREF_SSC1_ENABLE);
15418
15419                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15420                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15421                                      bios_lvds_use_ssc ? "en" : "dis",
15422                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15423                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15424                 }
15425         }
15426
15427         /* maximum framebuffer dimensions */
15428         if (IS_GEN(dev_priv, 2)) {
15429                 dev->mode_config.max_width = 2048;
15430                 dev->mode_config.max_height = 2048;
15431         } else if (IS_GEN(dev_priv, 3)) {
15432                 dev->mode_config.max_width = 4096;
15433                 dev->mode_config.max_height = 4096;
15434         } else {
15435                 dev->mode_config.max_width = 8192;
15436                 dev->mode_config.max_height = 8192;
15437         }
15438
15439         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15440                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15441                 dev->mode_config.cursor_height = 1023;
15442         } else if (IS_GEN(dev_priv, 2)) {
15443                 dev->mode_config.cursor_width = 64;
15444                 dev->mode_config.cursor_height = 64;
15445         } else {
15446                 dev->mode_config.cursor_width = 256;
15447                 dev->mode_config.cursor_height = 256;
15448         }
15449
15450         dev->mode_config.fb_base = ggtt->gmadr.start;
15451
15452         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15453                       INTEL_INFO(dev_priv)->num_pipes,
15454                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15455
15456         for_each_pipe(dev_priv, pipe) {
15457                 ret = intel_crtc_init(dev_priv, pipe);
15458                 if (ret) {
15459                         drm_mode_config_cleanup(dev);
15460                         return ret;
15461                 }
15462         }
15463
15464         intel_shared_dpll_init(dev);
15465         intel_update_fdi_pll_freq(dev_priv);
15466
15467         intel_update_czclk(dev_priv);
15468         intel_modeset_init_hw(dev);
15469
15470         if (dev_priv->max_cdclk_freq == 0)
15471                 intel_update_max_cdclk(dev_priv);
15472
15473         /* Just disable it once at startup */
15474         i915_disable_vga(dev_priv);
15475         intel_setup_outputs(dev_priv);
15476
15477         drm_modeset_lock_all(dev);
15478         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15479         drm_modeset_unlock_all(dev);
15480
15481         for_each_intel_crtc(dev, crtc) {
15482                 struct intel_initial_plane_config plane_config = {};
15483
15484                 if (!crtc->active)
15485                         continue;
15486
15487                 /*
15488                  * Note that reserving the BIOS fb up front prevents us
15489                  * from stuffing other stolen allocations like the ring
15490                  * on top.  This prevents some ugliness at boot time, and
15491                  * can even allow for smooth boot transitions if the BIOS
15492                  * fb is large enough for the active pipe configuration.
15493                  */
15494                 dev_priv->display.get_initial_plane_config(crtc,
15495                                                            &plane_config);
15496
15497                 /*
15498                  * If the fb is shared between multiple heads, we'll
15499                  * just get the first one.
15500                  */
15501                 intel_find_initial_plane_obj(crtc, &plane_config);
15502         }
15503
15504         /*
15505          * Make sure hardware watermarks really match the state we read out.
15506          * Note that we need to do this after reconstructing the BIOS fb's
15507          * since the watermark calculation done here will use pstate->fb.
15508          */
15509         if (!HAS_GMCH(dev_priv))
15510                 sanitize_watermarks(dev);
15511
15512         /*
15513          * Force all active planes to recompute their states. So that on
15514          * mode_setcrtc after probe, all the intel_plane_state variables
15515          * are already calculated and there is no assert_plane warnings
15516          * during bootup.
15517          */
15518         ret = intel_initial_commit(dev);
15519         if (ret)
15520                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15521
15522         return 0;
15523 }
15524
15525 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15526 {
15527         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15528         /* 640x480@60Hz, ~25175 kHz */
15529         struct dpll clock = {
15530                 .m1 = 18,
15531                 .m2 = 7,
15532                 .p1 = 13,
15533                 .p2 = 4,
15534                 .n = 2,
15535         };
15536         u32 dpll, fp;
15537         int i;
15538
15539         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15540
15541         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15542                       pipe_name(pipe), clock.vco, clock.dot);
15543
15544         fp = i9xx_dpll_compute_fp(&clock);
15545         dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15546                 DPLL_VGA_MODE_DIS |
15547                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15548                 PLL_P2_DIVIDE_BY_4 |
15549                 PLL_REF_INPUT_DREFCLK |
15550                 DPLL_VCO_ENABLE;
15551
15552         I915_WRITE(FP0(pipe), fp);
15553         I915_WRITE(FP1(pipe), fp);
15554
15555         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15556         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15557         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15558         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15559         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15560         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15561         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15562
15563         /*
15564          * Apparently we need to have VGA mode enabled prior to changing
15565          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15566          * dividers, even though the register value does change.
15567          */
15568         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15569         I915_WRITE(DPLL(pipe), dpll);
15570
15571         /* Wait for the clocks to stabilize. */
15572         POSTING_READ(DPLL(pipe));
15573         udelay(150);
15574
15575         /* The pixel multiplier can only be updated once the
15576          * DPLL is enabled and the clocks are stable.
15577          *
15578          * So write it again.
15579          */
15580         I915_WRITE(DPLL(pipe), dpll);
15581
15582         /* We do this three times for luck */
15583         for (i = 0; i < 3 ; i++) {
15584                 I915_WRITE(DPLL(pipe), dpll);
15585                 POSTING_READ(DPLL(pipe));
15586                 udelay(150); /* wait for warmup */
15587         }
15588
15589         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15590         POSTING_READ(PIPECONF(pipe));
15591
15592         intel_wait_for_pipe_scanline_moving(crtc);
15593 }
15594
15595 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15596 {
15597         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15598
15599         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15600                       pipe_name(pipe));
15601
15602         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15603         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15604         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15605         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15606         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15607
15608         I915_WRITE(PIPECONF(pipe), 0);
15609         POSTING_READ(PIPECONF(pipe));
15610
15611         intel_wait_for_pipe_scanline_stopped(crtc);
15612
15613         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15614         POSTING_READ(DPLL(pipe));
15615 }
15616
15617 static void
15618 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15619 {
15620         struct intel_crtc *crtc;
15621
15622         if (INTEL_GEN(dev_priv) >= 4)
15623                 return;
15624
15625         for_each_intel_crtc(&dev_priv->drm, crtc) {
15626                 struct intel_plane *plane =
15627                         to_intel_plane(crtc->base.primary);
15628                 struct intel_crtc *plane_crtc;
15629                 enum pipe pipe;
15630
15631                 if (!plane->get_hw_state(plane, &pipe))
15632                         continue;
15633
15634                 if (pipe == crtc->pipe)
15635                         continue;
15636
15637                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15638                               plane->base.base.id, plane->base.name);
15639
15640                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15641                 intel_plane_disable_noatomic(plane_crtc, plane);
15642         }
15643 }
15644
15645 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15646 {
15647         struct drm_device *dev = crtc->base.dev;
15648         struct intel_encoder *encoder;
15649
15650         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15651                 return true;
15652
15653         return false;
15654 }
15655
15656 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15657 {
15658         struct drm_device *dev = encoder->base.dev;
15659         struct intel_connector *connector;
15660
15661         for_each_connector_on_encoder(dev, &encoder->base, connector)
15662                 return connector;
15663
15664         return NULL;
15665 }
15666
15667 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15668                               enum pipe pch_transcoder)
15669 {
15670         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15671                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15672 }
15673
15674 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15675                                 struct drm_modeset_acquire_ctx *ctx)
15676 {
15677         struct drm_device *dev = crtc->base.dev;
15678         struct drm_i915_private *dev_priv = to_i915(dev);
15679         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15680         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
15681
15682         /* Clear any frame start delays used for debugging left by the BIOS */
15683         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15684                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15685
15686                 I915_WRITE(reg,
15687                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15688         }
15689
15690         if (crtc_state->base.active) {
15691                 struct intel_plane *plane;
15692
15693                 /* Disable everything but the primary plane */
15694                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15695                         const struct intel_plane_state *plane_state =
15696                                 to_intel_plane_state(plane->base.state);
15697
15698                         if (plane_state->base.visible &&
15699                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15700                                 intel_plane_disable_noatomic(crtc, plane);
15701                 }
15702
15703                 /*
15704                  * Disable any background color set by the BIOS, but enable the
15705                  * gamma and CSC to match how we program our planes.
15706                  */
15707                 if (INTEL_GEN(dev_priv) >= 9)
15708                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
15709                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
15710                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
15711         }
15712
15713         /* Adjust the state of the output pipe according to whether we
15714          * have active connectors/encoders. */
15715         if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
15716                 intel_crtc_disable_noatomic(&crtc->base, ctx);
15717
15718         if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
15719                 /*
15720                  * We start out with underrun reporting disabled to avoid races.
15721                  * For correct bookkeeping mark this on active crtcs.
15722                  *
15723                  * Also on gmch platforms we dont have any hardware bits to
15724                  * disable the underrun reporting. Which means we need to start
15725                  * out with underrun reporting disabled also on inactive pipes,
15726                  * since otherwise we'll complain about the garbage we read when
15727                  * e.g. coming up after runtime pm.
15728                  *
15729                  * No protection against concurrent access is required - at
15730                  * worst a fifo underrun happens which also sets this to false.
15731                  */
15732                 crtc->cpu_fifo_underrun_disabled = true;
15733                 /*
15734                  * We track the PCH trancoder underrun reporting state
15735                  * within the crtc. With crtc for pipe A housing the underrun
15736                  * reporting state for PCH transcoder A, crtc for pipe B housing
15737                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15738                  * and marking underrun reporting as disabled for the non-existing
15739                  * PCH transcoders B and C would prevent enabling the south
15740                  * error interrupt (see cpt_can_enable_serr_int()).
15741                  */
15742                 if (has_pch_trancoder(dev_priv, crtc->pipe))
15743                         crtc->pch_fifo_underrun_disabled = true;
15744         }
15745 }
15746
15747 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
15748 {
15749         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
15750
15751         /*
15752          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15753          * the hardware when a high res displays plugged in. DPLL P
15754          * divider is zero, and the pipe timings are bonkers. We'll
15755          * try to disable everything in that case.
15756          *
15757          * FIXME would be nice to be able to sanitize this state
15758          * without several WARNs, but for now let's take the easy
15759          * road.
15760          */
15761         return IS_GEN(dev_priv, 6) &&
15762                 crtc_state->base.active &&
15763                 crtc_state->shared_dpll &&
15764                 crtc_state->port_clock == 0;
15765 }
15766
15767 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15768 {
15769         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15770         struct intel_connector *connector;
15771         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
15772         struct intel_crtc_state *crtc_state = crtc ?
15773                 to_intel_crtc_state(crtc->base.state) : NULL;
15774
15775         /* We need to check both for a crtc link (meaning that the
15776          * encoder is active and trying to read from a pipe) and the
15777          * pipe itself being active. */
15778         bool has_active_crtc = crtc_state &&
15779                 crtc_state->base.active;
15780
15781         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
15782                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
15783                               pipe_name(crtc->pipe));
15784                 has_active_crtc = false;
15785         }
15786
15787         connector = intel_encoder_find_connector(encoder);
15788         if (connector && !has_active_crtc) {
15789                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15790                               encoder->base.base.id,
15791                               encoder->base.name);
15792
15793                 /* Connector is active, but has no active pipe. This is
15794                  * fallout from our resume register restoring. Disable
15795                  * the encoder manually again. */
15796                 if (crtc_state) {
15797                         struct drm_encoder *best_encoder;
15798
15799                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15800                                       encoder->base.base.id,
15801                                       encoder->base.name);
15802
15803                         /* avoid oopsing in case the hooks consult best_encoder */
15804                         best_encoder = connector->base.state->best_encoder;
15805                         connector->base.state->best_encoder = &encoder->base;
15806
15807                         if (encoder->disable)
15808                                 encoder->disable(encoder, crtc_state,
15809                                                  connector->base.state);
15810                         if (encoder->post_disable)
15811                                 encoder->post_disable(encoder, crtc_state,
15812                                                       connector->base.state);
15813
15814                         connector->base.state->best_encoder = best_encoder;
15815                 }
15816                 encoder->base.crtc = NULL;
15817
15818                 /* Inconsistent output/port/pipe state happens presumably due to
15819                  * a bug in one of the get_hw_state functions. Or someplace else
15820                  * in our code, like the register restore mess on resume. Clamp
15821                  * things to off as a safer default. */
15822
15823                 connector->base.dpms = DRM_MODE_DPMS_OFF;
15824                 connector->base.encoder = NULL;
15825         }
15826
15827         /* notify opregion of the sanitized encoder state */
15828         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15829
15830         if (INTEL_GEN(dev_priv) >= 11)
15831                 icl_sanitize_encoder_pll_mapping(encoder);
15832 }
15833
15834 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15835 {
15836         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15837
15838         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15839                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15840                 i915_disable_vga(dev_priv);
15841         }
15842 }
15843
15844 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15845 {
15846         intel_wakeref_t wakeref;
15847
15848         /*
15849          * This function can be called both from intel_modeset_setup_hw_state or
15850          * at a very early point in our resume sequence, where the power well
15851          * structures are not yet restored. Since this function is at a very
15852          * paranoid "someone might have enabled VGA while we were not looking"
15853          * level, just check if the power well is enabled instead of trying to
15854          * follow the "don't touch the power well if we don't need it" policy
15855          * the rest of the driver uses.
15856          */
15857         wakeref = intel_display_power_get_if_enabled(dev_priv,
15858                                                      POWER_DOMAIN_VGA);
15859         if (!wakeref)
15860                 return;
15861
15862         i915_redisable_vga_power_on(dev_priv);
15863
15864         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
15865 }
15866
15867 /* FIXME read out full plane state for all planes */
15868 static void readout_plane_state(struct drm_i915_private *dev_priv)
15869 {
15870         struct intel_plane *plane;
15871         struct intel_crtc *crtc;
15872
15873         for_each_intel_plane(&dev_priv->drm, plane) {
15874                 struct intel_plane_state *plane_state =
15875                         to_intel_plane_state(plane->base.state);
15876                 struct intel_crtc_state *crtc_state;
15877                 enum pipe pipe = PIPE_A;
15878                 bool visible;
15879
15880                 visible = plane->get_hw_state(plane, &pipe);
15881
15882                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15883                 crtc_state = to_intel_crtc_state(crtc->base.state);
15884
15885                 intel_set_plane_visible(crtc_state, plane_state, visible);
15886
15887                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15888                               plane->base.base.id, plane->base.name,
15889                               enableddisabled(visible), pipe_name(pipe));
15890         }
15891
15892         for_each_intel_crtc(&dev_priv->drm, crtc) {
15893                 struct intel_crtc_state *crtc_state =
15894                         to_intel_crtc_state(crtc->base.state);
15895
15896                 fixup_active_planes(crtc_state);
15897         }
15898 }
15899
15900 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15901 {
15902         struct drm_i915_private *dev_priv = to_i915(dev);
15903         enum pipe pipe;
15904         struct intel_crtc *crtc;
15905         struct intel_encoder *encoder;
15906         struct intel_connector *connector;
15907         struct drm_connector_list_iter conn_iter;
15908         int i;
15909
15910         dev_priv->active_crtcs = 0;
15911
15912         for_each_intel_crtc(dev, crtc) {
15913                 struct intel_crtc_state *crtc_state =
15914                         to_intel_crtc_state(crtc->base.state);
15915
15916                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15917                 memset(crtc_state, 0, sizeof(*crtc_state));
15918                 crtc_state->base.crtc = &crtc->base;
15919
15920                 crtc_state->base.active = crtc_state->base.enable =
15921                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15922
15923                 crtc->base.enabled = crtc_state->base.enable;
15924                 crtc->active = crtc_state->base.active;
15925
15926                 if (crtc_state->base.active)
15927                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15928
15929                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15930                               crtc->base.base.id, crtc->base.name,
15931                               enableddisabled(crtc_state->base.active));
15932         }
15933
15934         readout_plane_state(dev_priv);
15935
15936         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15937                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15938
15939                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15940                                                         &pll->state.hw_state);
15941                 pll->state.crtc_mask = 0;
15942                 for_each_intel_crtc(dev, crtc) {
15943                         struct intel_crtc_state *crtc_state =
15944                                 to_intel_crtc_state(crtc->base.state);
15945
15946                         if (crtc_state->base.active &&
15947                             crtc_state->shared_dpll == pll)
15948                                 pll->state.crtc_mask |= 1 << crtc->pipe;
15949                 }
15950                 pll->active_mask = pll->state.crtc_mask;
15951
15952                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15953                               pll->info->name, pll->state.crtc_mask, pll->on);
15954         }
15955
15956         for_each_intel_encoder(dev, encoder) {
15957                 pipe = 0;
15958
15959                 if (encoder->get_hw_state(encoder, &pipe)) {
15960                         struct intel_crtc_state *crtc_state;
15961
15962                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15963                         crtc_state = to_intel_crtc_state(crtc->base.state);
15964
15965                         encoder->base.crtc = &crtc->base;
15966                         encoder->get_config(encoder, crtc_state);
15967                 } else {
15968                         encoder->base.crtc = NULL;
15969                 }
15970
15971                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15972                               encoder->base.base.id, encoder->base.name,
15973                               enableddisabled(encoder->base.crtc),
15974                               pipe_name(pipe));
15975         }
15976
15977         drm_connector_list_iter_begin(dev, &conn_iter);
15978         for_each_intel_connector_iter(connector, &conn_iter) {
15979                 if (connector->get_hw_state(connector)) {
15980                         connector->base.dpms = DRM_MODE_DPMS_ON;
15981
15982                         encoder = connector->encoder;
15983                         connector->base.encoder = &encoder->base;
15984
15985                         if (encoder->base.crtc &&
15986                             encoder->base.crtc->state->active) {
15987                                 /*
15988                                  * This has to be done during hardware readout
15989                                  * because anything calling .crtc_disable may
15990                                  * rely on the connector_mask being accurate.
15991                                  */
15992                                 encoder->base.crtc->state->connector_mask |=
15993                                         drm_connector_mask(&connector->base);
15994                                 encoder->base.crtc->state->encoder_mask |=
15995                                         drm_encoder_mask(&encoder->base);
15996                         }
15997
15998                 } else {
15999                         connector->base.dpms = DRM_MODE_DPMS_OFF;
16000                         connector->base.encoder = NULL;
16001                 }
16002                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16003                               connector->base.base.id, connector->base.name,
16004                               enableddisabled(connector->base.encoder));
16005         }
16006         drm_connector_list_iter_end(&conn_iter);
16007
16008         for_each_intel_crtc(dev, crtc) {
16009                 struct intel_crtc_state *crtc_state =
16010                         to_intel_crtc_state(crtc->base.state);
16011                 int min_cdclk = 0;
16012
16013                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16014                 if (crtc_state->base.active) {
16015                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16016                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16017                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16018                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16019                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16020
16021                         /*
16022                          * The initial mode needs to be set in order to keep
16023                          * the atomic core happy. It wants a valid mode if the
16024                          * crtc's enabled, so we do the above call.
16025                          *
16026                          * But we don't set all the derived state fully, hence
16027                          * set a flag to indicate that a full recalculation is
16028                          * needed on the next commit.
16029                          */
16030                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16031
16032                         intel_crtc_compute_pixel_rate(crtc_state);
16033
16034                         if (dev_priv->display.modeset_calc_cdclk) {
16035                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16036                                 if (WARN_ON(min_cdclk < 0))
16037                                         min_cdclk = 0;
16038                         }
16039
16040                         drm_calc_timestamping_constants(&crtc->base,
16041                                                         &crtc_state->base.adjusted_mode);
16042                         update_scanline_offset(crtc_state);
16043                 }
16044
16045                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16046                 dev_priv->min_voltage_level[crtc->pipe] =
16047                         crtc_state->min_voltage_level;
16048
16049                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16050         }
16051 }
16052
16053 static void
16054 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16055 {
16056         struct intel_encoder *encoder;
16057
16058         for_each_intel_encoder(&dev_priv->drm, encoder) {
16059                 u64 get_domains;
16060                 enum intel_display_power_domain domain;
16061                 struct intel_crtc_state *crtc_state;
16062
16063                 if (!encoder->get_power_domains)
16064                         continue;
16065
16066                 /*
16067                  * MST-primary and inactive encoders don't have a crtc state
16068                  * and neither of these require any power domain references.
16069                  */
16070                 if (!encoder->base.crtc)
16071                         continue;
16072
16073                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16074                 get_domains = encoder->get_power_domains(encoder, crtc_state);
16075                 for_each_power_domain(domain, get_domains)
16076                         intel_display_power_get(dev_priv, domain);
16077         }
16078 }
16079
16080 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16081 {
16082         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16083         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16084                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16085                            DARBF_GATING_DIS);
16086
16087         if (IS_HASWELL(dev_priv)) {
16088                 /*
16089                  * WaRsPkgCStateDisplayPMReq:hsw
16090                  * System hang if this isn't done before disabling all planes!
16091                  */
16092                 I915_WRITE(CHICKEN_PAR1_1,
16093                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16094         }
16095 }
16096
16097 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16098                                        enum port port, i915_reg_t hdmi_reg)
16099 {
16100         u32 val = I915_READ(hdmi_reg);
16101
16102         if (val & SDVO_ENABLE ||
16103             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16104                 return;
16105
16106         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16107                       port_name(port));
16108
16109         val &= ~SDVO_PIPE_SEL_MASK;
16110         val |= SDVO_PIPE_SEL(PIPE_A);
16111
16112         I915_WRITE(hdmi_reg, val);
16113 }
16114
16115 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16116                                      enum port port, i915_reg_t dp_reg)
16117 {
16118         u32 val = I915_READ(dp_reg);
16119
16120         if (val & DP_PORT_EN ||
16121             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16122                 return;
16123
16124         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16125                       port_name(port));
16126
16127         val &= ~DP_PIPE_SEL_MASK;
16128         val |= DP_PIPE_SEL(PIPE_A);
16129
16130         I915_WRITE(dp_reg, val);
16131 }
16132
16133 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16134 {
16135         /*
16136          * The BIOS may select transcoder B on some of the PCH
16137          * ports even it doesn't enable the port. This would trip
16138          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16139          * Sanitize the transcoder select bits to prevent that. We
16140          * assume that the BIOS never actually enabled the port,
16141          * because if it did we'd actually have to toggle the port
16142          * on and back off to make the transcoder A select stick
16143          * (see. intel_dp_link_down(), intel_disable_hdmi(),
16144          * intel_disable_sdvo()).
16145          */
16146         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16147         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16148         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16149
16150         /* PCH SDVOB multiplex with HDMIB */
16151         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16152         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16153         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16154 }
16155
16156 /* Scan out the current hw modeset state,
16157  * and sanitizes it to the current state
16158  */
16159 static void
16160 intel_modeset_setup_hw_state(struct drm_device *dev,
16161                              struct drm_modeset_acquire_ctx *ctx)
16162 {
16163         struct drm_i915_private *dev_priv = to_i915(dev);
16164         struct intel_crtc_state *crtc_state;
16165         struct intel_encoder *encoder;
16166         struct intel_crtc *crtc;
16167         intel_wakeref_t wakeref;
16168         int i;
16169
16170         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16171
16172         intel_early_display_was(dev_priv);
16173         intel_modeset_readout_hw_state(dev);
16174
16175         /* HW state is read out, now we need to sanitize this mess. */
16176         get_encoder_power_domains(dev_priv);
16177
16178         if (HAS_PCH_IBX(dev_priv))
16179                 ibx_sanitize_pch_ports(dev_priv);
16180
16181         /*
16182          * intel_sanitize_plane_mapping() may need to do vblank
16183          * waits, so we need vblank interrupts restored beforehand.
16184          */
16185         for_each_intel_crtc(&dev_priv->drm, crtc) {
16186                 crtc_state = to_intel_crtc_state(crtc->base.state);
16187
16188                 drm_crtc_vblank_reset(&crtc->base);
16189
16190                 if (crtc_state->base.active)
16191                         intel_crtc_vblank_on(crtc_state);
16192         }
16193
16194         intel_sanitize_plane_mapping(dev_priv);
16195
16196         for_each_intel_encoder(dev, encoder)
16197                 intel_sanitize_encoder(encoder);
16198
16199         for_each_intel_crtc(&dev_priv->drm, crtc) {
16200                 crtc_state = to_intel_crtc_state(crtc->base.state);
16201                 intel_sanitize_crtc(crtc, ctx);
16202                 intel_dump_pipe_config(crtc, crtc_state,
16203                                        "[setup_hw_state]");
16204         }
16205
16206         intel_modeset_update_connector_atomic_state(dev);
16207
16208         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16209                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16210
16211                 if (!pll->on || pll->active_mask)
16212                         continue;
16213
16214                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16215                               pll->info->name);
16216
16217                 pll->info->funcs->disable(dev_priv, pll);
16218                 pll->on = false;
16219         }
16220
16221         if (IS_G4X(dev_priv)) {
16222                 g4x_wm_get_hw_state(dev_priv);
16223                 g4x_wm_sanitize(dev_priv);
16224         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16225                 vlv_wm_get_hw_state(dev_priv);
16226                 vlv_wm_sanitize(dev_priv);
16227         } else if (INTEL_GEN(dev_priv) >= 9) {
16228                 skl_wm_get_hw_state(dev_priv);
16229         } else if (HAS_PCH_SPLIT(dev_priv)) {
16230                 ilk_wm_get_hw_state(dev_priv);
16231         }
16232
16233         for_each_intel_crtc(dev, crtc) {
16234                 u64 put_domains;
16235
16236                 crtc_state = to_intel_crtc_state(crtc->base.state);
16237                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16238                 if (WARN_ON(put_domains))
16239                         modeset_put_power_domains(dev_priv, put_domains);
16240         }
16241
16242         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16243
16244         intel_fbc_init_pipe_state(dev_priv);
16245 }
16246
16247 void intel_display_resume(struct drm_device *dev)
16248 {
16249         struct drm_i915_private *dev_priv = to_i915(dev);
16250         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16251         struct drm_modeset_acquire_ctx ctx;
16252         int ret;
16253
16254         dev_priv->modeset_restore_state = NULL;
16255         if (state)
16256                 state->acquire_ctx = &ctx;
16257
16258         drm_modeset_acquire_init(&ctx, 0);
16259
16260         while (1) {
16261                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16262                 if (ret != -EDEADLK)
16263                         break;
16264
16265                 drm_modeset_backoff(&ctx);
16266         }
16267
16268         if (!ret)
16269                 ret = __intel_display_resume(dev, state, &ctx);
16270
16271         intel_enable_ipc(dev_priv);
16272         drm_modeset_drop_locks(&ctx);
16273         drm_modeset_acquire_fini(&ctx);
16274
16275         if (ret)
16276                 DRM_ERROR("Restoring old state failed with %i\n", ret);
16277         if (state)
16278                 drm_atomic_state_put(state);
16279 }
16280
16281 static void intel_hpd_poll_fini(struct drm_device *dev)
16282 {
16283         struct intel_connector *connector;
16284         struct drm_connector_list_iter conn_iter;
16285
16286         /* Kill all the work that may have been queued by hpd. */
16287         drm_connector_list_iter_begin(dev, &conn_iter);
16288         for_each_intel_connector_iter(connector, &conn_iter) {
16289                 if (connector->modeset_retry_work.func)
16290                         cancel_work_sync(&connector->modeset_retry_work);
16291                 if (connector->hdcp.shim) {
16292                         cancel_delayed_work_sync(&connector->hdcp.check_work);
16293                         cancel_work_sync(&connector->hdcp.prop_work);
16294                 }
16295         }
16296         drm_connector_list_iter_end(&conn_iter);
16297 }
16298
16299 void intel_modeset_cleanup(struct drm_device *dev)
16300 {
16301         struct drm_i915_private *dev_priv = to_i915(dev);
16302
16303         flush_workqueue(dev_priv->modeset_wq);
16304
16305         flush_work(&dev_priv->atomic_helper.free_work);
16306         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16307
16308         /*
16309          * Interrupts and polling as the first thing to avoid creating havoc.
16310          * Too much stuff here (turning of connectors, ...) would
16311          * experience fancy races otherwise.
16312          */
16313         intel_irq_uninstall(dev_priv);
16314
16315         /*
16316          * Due to the hpd irq storm handling the hotplug work can re-arm the
16317          * poll handlers. Hence disable polling after hpd handling is shut down.
16318          */
16319         intel_hpd_poll_fini(dev);
16320
16321         /* poll work can call into fbdev, hence clean that up afterwards */
16322         intel_fbdev_fini(dev_priv);
16323
16324         intel_unregister_dsm_handler();
16325
16326         intel_fbc_global_disable(dev_priv);
16327
16328         /* flush any delayed tasks or pending work */
16329         flush_scheduled_work();
16330
16331         drm_mode_config_cleanup(dev);
16332
16333         intel_overlay_cleanup(dev_priv);
16334
16335         intel_teardown_gmbus(dev_priv);
16336
16337         destroy_workqueue(dev_priv->modeset_wq);
16338
16339         intel_fbc_cleanup_cfb(dev_priv);
16340 }
16341
16342 /*
16343  * set vga decode state - true == enable VGA decode
16344  */
16345 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16346 {
16347         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16348         u16 gmch_ctrl;
16349
16350         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16351                 DRM_ERROR("failed to read control word\n");
16352                 return -EIO;
16353         }
16354
16355         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16356                 return 0;
16357
16358         if (state)
16359                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16360         else
16361                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16362
16363         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16364                 DRM_ERROR("failed to write control word\n");
16365                 return -EIO;
16366         }
16367
16368         return 0;
16369 }
16370
16371 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16372
16373 struct intel_display_error_state {
16374
16375         u32 power_well_driver;
16376
16377         int num_transcoders;
16378
16379         struct intel_cursor_error_state {
16380                 u32 control;
16381                 u32 position;
16382                 u32 base;
16383                 u32 size;
16384         } cursor[I915_MAX_PIPES];
16385
16386         struct intel_pipe_error_state {
16387                 bool power_domain_on;
16388                 u32 source;
16389                 u32 stat;
16390         } pipe[I915_MAX_PIPES];
16391
16392         struct intel_plane_error_state {
16393                 u32 control;
16394                 u32 stride;
16395                 u32 size;
16396                 u32 pos;
16397                 u32 addr;
16398                 u32 surface;
16399                 u32 tile_offset;
16400         } plane[I915_MAX_PIPES];
16401
16402         struct intel_transcoder_error_state {
16403                 bool power_domain_on;
16404                 enum transcoder cpu_transcoder;
16405
16406                 u32 conf;
16407
16408                 u32 htotal;
16409                 u32 hblank;
16410                 u32 hsync;
16411                 u32 vtotal;
16412                 u32 vblank;
16413                 u32 vsync;
16414         } transcoder[4];
16415 };
16416
16417 struct intel_display_error_state *
16418 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16419 {
16420         struct intel_display_error_state *error;
16421         int transcoders[] = {
16422                 TRANSCODER_A,
16423                 TRANSCODER_B,
16424                 TRANSCODER_C,
16425                 TRANSCODER_EDP,
16426         };
16427         int i;
16428
16429         if (!HAS_DISPLAY(dev_priv))
16430                 return NULL;
16431
16432         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16433         if (error == NULL)
16434                 return NULL;
16435
16436         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16437                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16438
16439         for_each_pipe(dev_priv, i) {
16440                 error->pipe[i].power_domain_on =
16441                         __intel_display_power_is_enabled(dev_priv,
16442                                                          POWER_DOMAIN_PIPE(i));
16443                 if (!error->pipe[i].power_domain_on)
16444                         continue;
16445
16446                 error->cursor[i].control = I915_READ(CURCNTR(i));
16447                 error->cursor[i].position = I915_READ(CURPOS(i));
16448                 error->cursor[i].base = I915_READ(CURBASE(i));
16449
16450                 error->plane[i].control = I915_READ(DSPCNTR(i));
16451                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16452                 if (INTEL_GEN(dev_priv) <= 3) {
16453                         error->plane[i].size = I915_READ(DSPSIZE(i));
16454                         error->plane[i].pos = I915_READ(DSPPOS(i));
16455                 }
16456                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16457                         error->plane[i].addr = I915_READ(DSPADDR(i));
16458                 if (INTEL_GEN(dev_priv) >= 4) {
16459                         error->plane[i].surface = I915_READ(DSPSURF(i));
16460                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16461                 }
16462
16463                 error->pipe[i].source = I915_READ(PIPESRC(i));
16464
16465                 if (HAS_GMCH(dev_priv))
16466                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16467         }
16468
16469         /* Note: this does not include DSI transcoders. */
16470         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16471         if (HAS_DDI(dev_priv))
16472                 error->num_transcoders++; /* Account for eDP. */
16473
16474         for (i = 0; i < error->num_transcoders; i++) {
16475                 enum transcoder cpu_transcoder = transcoders[i];
16476
16477                 error->transcoder[i].power_domain_on =
16478                         __intel_display_power_is_enabled(dev_priv,
16479                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16480                 if (!error->transcoder[i].power_domain_on)
16481                         continue;
16482
16483                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16484
16485                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16486                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16487                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16488                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16489                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16490                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16491                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16492         }
16493
16494         return error;
16495 }
16496
16497 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16498
16499 void
16500 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16501                                 struct intel_display_error_state *error)
16502 {
16503         struct drm_i915_private *dev_priv = m->i915;
16504         int i;
16505
16506         if (!error)
16507                 return;
16508
16509         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16510         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16511                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16512                            error->power_well_driver);
16513         for_each_pipe(dev_priv, i) {
16514                 err_printf(m, "Pipe [%d]:\n", i);
16515                 err_printf(m, "  Power: %s\n",
16516                            onoff(error->pipe[i].power_domain_on));
16517                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16518                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16519
16520                 err_printf(m, "Plane [%d]:\n", i);
16521                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16522                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16523                 if (INTEL_GEN(dev_priv) <= 3) {
16524                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16525                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16526                 }
16527                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16528                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16529                 if (INTEL_GEN(dev_priv) >= 4) {
16530                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16531                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16532                 }
16533
16534                 err_printf(m, "Cursor [%d]:\n", i);
16535                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16536                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16537                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16538         }
16539
16540         for (i = 0; i < error->num_transcoders; i++) {
16541                 err_printf(m, "CPU transcoder: %s\n",
16542                            transcoder_name(error->transcoder[i].cpu_transcoder));
16543                 err_printf(m, "  Power: %s\n",
16544                            onoff(error->transcoder[i].power_domain_on));
16545                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16546                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16547                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16548                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16549                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16550                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16551                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16552         }
16553 }
16554
16555 #endif